2024-12-08 00:19:34,484 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-12-08 00:19:34,496 main DEBUG Took 0.010187 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-08 00:19:34,496 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-08 00:19:34,496 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-08 00:19:34,497 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-08 00:19:34,498 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 00:19:34,505 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-08 00:19:34,515 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 00:19:34,517 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 00:19:34,517 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 00:19:34,518 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 00:19:34,518 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 00:19:34,519 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 00:19:34,519 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 00:19:34,520 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 00:19:34,520 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 00:19:34,520 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 00:19:34,521 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 00:19:34,521 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 00:19:34,522 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 00:19:34,522 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 00:19:34,522 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 00:19:34,523 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 00:19:34,523 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 00:19:34,523 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 00:19:34,524 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 00:19:34,524 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 00:19:34,524 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 00:19:34,525 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 00:19:34,525 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 00:19:34,525 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 00:19:34,526 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 00:19:34,526 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-08 00:19:34,527 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 00:19:34,529 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-08 00:19:34,530 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-08 00:19:34,531 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-08 00:19:34,532 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-08 00:19:34,532 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-08 00:19:34,540 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-08 00:19:34,542 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-08 00:19:34,544 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-08 00:19:34,544 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-08 00:19:34,544 main DEBUG createAppenders(={Console}) 2024-12-08 00:19:34,545 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 initialized 2024-12-08 00:19:34,545 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-12-08 00:19:34,545 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 OK. 2024-12-08 00:19:34,546 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-08 00:19:34,546 main DEBUG OutputStream closed 2024-12-08 00:19:34,546 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-08 00:19:34,547 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-08 00:19:34,547 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@54e1c68b OK 2024-12-08 00:19:34,613 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-08 00:19:34,615 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-08 00:19:34,616 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-08 00:19:34,617 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-08 00:19:34,617 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-08 00:19:34,617 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-08 00:19:34,618 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-08 00:19:34,618 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-08 00:19:34,618 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-08 00:19:34,618 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-08 00:19:34,619 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-08 00:19:34,619 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-08 00:19:34,619 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-08 00:19:34,619 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-08 00:19:34,620 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-08 00:19:34,620 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-08 00:19:34,620 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-08 00:19:34,621 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-08 00:19:34,623 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-08 00:19:34,624 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@7dda48d9) with optional ClassLoader: null 2024-12-08 00:19:34,624 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-08 00:19:34,625 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@7dda48d9] started OK. 2024-12-08T00:19:34,862 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9fef9b88-fce3-0eb6-cbce-4bb545ae17d4 2024-12-08 00:19:34,864 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-08 00:19:34,865 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-08T00:19:34,873 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.TestAcidGuaranteesWithAdaptivePolicy timeout: 13 mins 2024-12-08T00:19:34,891 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-08T00:19:34,894 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9fef9b88-fce3-0eb6-cbce-4bb545ae17d4/cluster_90f0d14f-daf9-4dbb-8963-b50f17f51d54, deleteOnExit=true 2024-12-08T00:19:34,894 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-08T00:19:34,895 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9fef9b88-fce3-0eb6-cbce-4bb545ae17d4/test.cache.data in system properties and HBase conf 2024-12-08T00:19:34,895 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9fef9b88-fce3-0eb6-cbce-4bb545ae17d4/hadoop.tmp.dir in system properties and HBase conf 2024-12-08T00:19:34,896 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9fef9b88-fce3-0eb6-cbce-4bb545ae17d4/hadoop.log.dir in system properties and HBase conf 2024-12-08T00:19:34,897 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9fef9b88-fce3-0eb6-cbce-4bb545ae17d4/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-08T00:19:34,897 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9fef9b88-fce3-0eb6-cbce-4bb545ae17d4/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-08T00:19:34,897 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-08T00:19:34,989 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-08T00:19:35,094 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-08T00:19:35,099 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9fef9b88-fce3-0eb6-cbce-4bb545ae17d4/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-08T00:19:35,100 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9fef9b88-fce3-0eb6-cbce-4bb545ae17d4/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-08T00:19:35,100 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9fef9b88-fce3-0eb6-cbce-4bb545ae17d4/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-08T00:19:35,101 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9fef9b88-fce3-0eb6-cbce-4bb545ae17d4/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-08T00:19:35,102 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9fef9b88-fce3-0eb6-cbce-4bb545ae17d4/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-08T00:19:35,102 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9fef9b88-fce3-0eb6-cbce-4bb545ae17d4/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-08T00:19:35,103 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9fef9b88-fce3-0eb6-cbce-4bb545ae17d4/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-08T00:19:35,103 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9fef9b88-fce3-0eb6-cbce-4bb545ae17d4/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-08T00:19:35,104 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9fef9b88-fce3-0eb6-cbce-4bb545ae17d4/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-08T00:19:35,105 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9fef9b88-fce3-0eb6-cbce-4bb545ae17d4/nfs.dump.dir in system properties and HBase conf 2024-12-08T00:19:35,105 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9fef9b88-fce3-0eb6-cbce-4bb545ae17d4/java.io.tmpdir in system properties and HBase conf 2024-12-08T00:19:35,106 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9fef9b88-fce3-0eb6-cbce-4bb545ae17d4/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-08T00:19:35,106 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9fef9b88-fce3-0eb6-cbce-4bb545ae17d4/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-08T00:19:35,107 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9fef9b88-fce3-0eb6-cbce-4bb545ae17d4/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-08T00:19:35,993 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-08T00:19:36,085 INFO [Time-limited test {}] log.Log(170): Logging initialized @2482ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-08T00:19:36,161 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T00:19:36,236 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T00:19:36,259 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T00:19:36,260 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T00:19:36,261 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-08T00:19:36,275 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T00:19:36,278 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9fef9b88-fce3-0eb6-cbce-4bb545ae17d4/hadoop.log.dir/,AVAILABLE} 2024-12-08T00:19:36,279 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T00:19:36,487 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@b03fcff{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9fef9b88-fce3-0eb6-cbce-4bb545ae17d4/java.io.tmpdir/jetty-localhost-46119-hadoop-hdfs-3_4_1-tests_jar-_-any-5661126660622246604/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-08T00:19:36,494 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:46119} 2024-12-08T00:19:36,494 INFO [Time-limited test {}] server.Server(415): Started @2892ms 2024-12-08T00:19:36,880 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T00:19:36,887 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T00:19:36,888 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T00:19:36,889 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T00:19:36,889 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T00:19:36,890 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47db50b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9fef9b88-fce3-0eb6-cbce-4bb545ae17d4/hadoop.log.dir/,AVAILABLE} 2024-12-08T00:19:36,891 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4727fac8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T00:19:37,010 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1f79ec76{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9fef9b88-fce3-0eb6-cbce-4bb545ae17d4/java.io.tmpdir/jetty-localhost-37993-hadoop-hdfs-3_4_1-tests_jar-_-any-16580936048094820653/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T00:19:37,011 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@576ebda6{HTTP/1.1, (http/1.1)}{localhost:37993} 2024-12-08T00:19:37,011 INFO [Time-limited test {}] server.Server(415): Started @3409ms 2024-12-08T00:19:37,067 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T00:19:37,586 WARN [Thread-71 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9fef9b88-fce3-0eb6-cbce-4bb545ae17d4/cluster_90f0d14f-daf9-4dbb-8963-b50f17f51d54/dfs/data/data1/current/BP-1532074142-172.17.0.2-1733617175722/current, will proceed with Du for space computation calculation, 2024-12-08T00:19:37,586 WARN [Thread-72 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9fef9b88-fce3-0eb6-cbce-4bb545ae17d4/cluster_90f0d14f-daf9-4dbb-8963-b50f17f51d54/dfs/data/data2/current/BP-1532074142-172.17.0.2-1733617175722/current, will proceed with Du for space computation calculation, 2024-12-08T00:19:37,636 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T00:19:37,692 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9fd8218e47585305 with lease ID 0x5378411b215e5b27: Processing first storage report for DS-73f1f6d7-b1cd-475e-8dda-79b8d066a218 from datanode DatanodeRegistration(127.0.0.1:42385, datanodeUuid=ec2b63d5-2d4b-41e9-8e1d-ba3cd3db80f9, infoPort=42901, infoSecurePort=0, ipcPort=46597, storageInfo=lv=-57;cid=testClusterID;nsid=1000942164;c=1733617175722) 2024-12-08T00:19:37,693 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9fd8218e47585305 with lease ID 0x5378411b215e5b27: from storage DS-73f1f6d7-b1cd-475e-8dda-79b8d066a218 node DatanodeRegistration(127.0.0.1:42385, datanodeUuid=ec2b63d5-2d4b-41e9-8e1d-ba3cd3db80f9, infoPort=42901, infoSecurePort=0, ipcPort=46597, storageInfo=lv=-57;cid=testClusterID;nsid=1000942164;c=1733617175722), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-08T00:19:37,694 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9fd8218e47585305 with lease ID 0x5378411b215e5b27: Processing first storage report for DS-e8c44dc7-608c-4ee3-9eef-129d68d7a1d9 from datanode DatanodeRegistration(127.0.0.1:42385, datanodeUuid=ec2b63d5-2d4b-41e9-8e1d-ba3cd3db80f9, infoPort=42901, infoSecurePort=0, ipcPort=46597, storageInfo=lv=-57;cid=testClusterID;nsid=1000942164;c=1733617175722) 2024-12-08T00:19:37,694 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9fd8218e47585305 with lease ID 0x5378411b215e5b27: from storage DS-e8c44dc7-608c-4ee3-9eef-129d68d7a1d9 node DatanodeRegistration(127.0.0.1:42385, datanodeUuid=ec2b63d5-2d4b-41e9-8e1d-ba3cd3db80f9, infoPort=42901, infoSecurePort=0, ipcPort=46597, storageInfo=lv=-57;cid=testClusterID;nsid=1000942164;c=1733617175722), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-08T00:19:37,708 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9fef9b88-fce3-0eb6-cbce-4bb545ae17d4 2024-12-08T00:19:37,782 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9fef9b88-fce3-0eb6-cbce-4bb545ae17d4/cluster_90f0d14f-daf9-4dbb-8963-b50f17f51d54/zookeeper_0, clientPort=62287, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9fef9b88-fce3-0eb6-cbce-4bb545ae17d4/cluster_90f0d14f-daf9-4dbb-8963-b50f17f51d54/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9fef9b88-fce3-0eb6-cbce-4bb545ae17d4/cluster_90f0d14f-daf9-4dbb-8963-b50f17f51d54/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-08T00:19:37,792 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=62287 2024-12-08T00:19:37,805 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:19:37,810 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:19:38,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741825_1001 (size=7) 2024-12-08T00:19:38,457 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3 with version=8 2024-12-08T00:19:38,457 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/hbase-staging 2024-12-08T00:19:38,584 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-08T00:19:38,858 INFO [Time-limited test {}] client.ConnectionUtils(129): master/017dd09fb407:0 server-side Connection retries=45 2024-12-08T00:19:38,878 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T00:19:38,878 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T00:19:38,879 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T00:19:38,879 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T00:19:38,879 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T00:19:39,025 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-08T00:19:39,090 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-08T00:19:39,100 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-08T00:19:39,104 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T00:19:39,133 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 8279 (auto-detected) 2024-12-08T00:19:39,134 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-08T00:19:39,155 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:44717 2024-12-08T00:19:39,164 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:19:39,166 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:19:39,179 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:44717 connecting to ZooKeeper ensemble=127.0.0.1:62287 2024-12-08T00:19:39,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:447170x0, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T00:19:39,213 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:44717-0x1006efe8d8b0000 connected 2024-12-08T00:19:39,245 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44717-0x1006efe8d8b0000, quorum=127.0.0.1:62287, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-08T00:19:39,248 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44717-0x1006efe8d8b0000, quorum=127.0.0.1:62287, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T00:19:39,251 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44717-0x1006efe8d8b0000, quorum=127.0.0.1:62287, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T00:19:39,255 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44717 2024-12-08T00:19:39,256 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44717 2024-12-08T00:19:39,258 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44717 2024-12-08T00:19:39,258 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44717 2024-12-08T00:19:39,258 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44717 2024-12-08T00:19:39,266 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3, hbase.cluster.distributed=false 2024-12-08T00:19:39,336 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/017dd09fb407:0 server-side Connection retries=45 2024-12-08T00:19:39,337 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T00:19:39,337 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T00:19:39,337 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T00:19:39,338 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T00:19:39,338 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T00:19:39,340 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-08T00:19:39,343 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T00:19:39,344 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:36703 2024-12-08T00:19:39,346 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-08T00:19:39,354 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-08T00:19:39,356 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:19:39,359 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:19:39,364 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:36703 connecting to ZooKeeper ensemble=127.0.0.1:62287 2024-12-08T00:19:39,368 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:367030x0, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T00:19:39,369 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36703-0x1006efe8d8b0001 connected 2024-12-08T00:19:39,369 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36703-0x1006efe8d8b0001, quorum=127.0.0.1:62287, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-08T00:19:39,371 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36703-0x1006efe8d8b0001, quorum=127.0.0.1:62287, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T00:19:39,372 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36703-0x1006efe8d8b0001, quorum=127.0.0.1:62287, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T00:19:39,373 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36703 2024-12-08T00:19:39,373 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36703 2024-12-08T00:19:39,376 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36703 2024-12-08T00:19:39,377 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36703 2024-12-08T00:19:39,377 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36703 2024-12-08T00:19:39,380 INFO [master/017dd09fb407:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/017dd09fb407,44717,1733617178577 2024-12-08T00:19:39,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36703-0x1006efe8d8b0001, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T00:19:39,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44717-0x1006efe8d8b0000, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T00:19:39,389 DEBUG [master/017dd09fb407:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44717-0x1006efe8d8b0000, quorum=127.0.0.1:62287, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/017dd09fb407,44717,1733617178577 2024-12-08T00:19:39,396 DEBUG [M:0;017dd09fb407:44717 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;017dd09fb407:44717 2024-12-08T00:19:39,410 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36703-0x1006efe8d8b0001, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-08T00:19:39,410 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44717-0x1006efe8d8b0000, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-08T00:19:39,411 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36703-0x1006efe8d8b0001, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:19:39,411 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44717-0x1006efe8d8b0000, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:19:39,412 DEBUG [master/017dd09fb407:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44717-0x1006efe8d8b0000, quorum=127.0.0.1:62287, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-08T00:19:39,412 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:44717-0x1006efe8d8b0000, quorum=127.0.0.1:62287, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-08T00:19:39,413 INFO [master/017dd09fb407:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/017dd09fb407,44717,1733617178577 from backup master directory 2024-12-08T00:19:39,415 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44717-0x1006efe8d8b0000, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/017dd09fb407,44717,1733617178577 2024-12-08T00:19:39,415 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36703-0x1006efe8d8b0001, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T00:19:39,416 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44717-0x1006efe8d8b0000, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T00:19:39,416 WARN [master/017dd09fb407:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T00:19:39,416 INFO [master/017dd09fb407:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=017dd09fb407,44717,1733617178577 2024-12-08T00:19:39,419 INFO [master/017dd09fb407:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-08T00:19:39,420 INFO [master/017dd09fb407:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-08T00:19:39,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741826_1002 (size=42) 2024-12-08T00:19:39,901 DEBUG [master/017dd09fb407:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/hbase.id with ID: a47f801a-c3bd-4432-bfde-d2bdba82664b 2024-12-08T00:19:39,943 INFO [master/017dd09fb407:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:19:39,968 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36703-0x1006efe8d8b0001, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:19:39,968 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44717-0x1006efe8d8b0000, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:19:39,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741827_1003 (size=196) 2024-12-08T00:19:40,004 INFO [master/017dd09fb407:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T00:19:40,008 INFO [master/017dd09fb407:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-08T00:19:40,026 DEBUG [master/017dd09fb407:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:147) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:160) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProvider(WALFactory.java:200) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:232) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:207) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:402) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:973) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2470) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:590) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.HMaster.lambda$run$1(HMaster.java:587) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:40,031 INFO [master/017dd09fb407:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-08T00:19:40,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741828_1004 (size=1189) 2024-12-08T00:19:40,082 INFO [master/017dd09fb407:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/MasterData/data/master/store 2024-12-08T00:19:40,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741829_1005 (size=34) 2024-12-08T00:19:40,504 INFO [master/017dd09fb407:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-08T00:19:40,505 DEBUG [master/017dd09fb407:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:19:40,506 DEBUG [master/017dd09fb407:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-08T00:19:40,506 INFO [master/017dd09fb407:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:19:40,507 DEBUG [master/017dd09fb407:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:19:40,507 DEBUG [master/017dd09fb407:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-08T00:19:40,507 DEBUG [master/017dd09fb407:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:19:40,507 INFO [master/017dd09fb407:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:19:40,507 DEBUG [master/017dd09fb407:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-08T00:19:40,510 WARN [master/017dd09fb407:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/MasterData/data/master/store/.initializing 2024-12-08T00:19:40,510 DEBUG [master/017dd09fb407:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/MasterData/WALs/017dd09fb407,44717,1733617178577 2024-12-08T00:19:40,518 INFO [master/017dd09fb407:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-08T00:19:40,529 INFO [master/017dd09fb407:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=017dd09fb407%2C44717%2C1733617178577, suffix=, logDir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/MasterData/WALs/017dd09fb407,44717,1733617178577, archiveDir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/MasterData/oldWALs, maxLogs=10 2024-12-08T00:19:40,551 DEBUG [master/017dd09fb407:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/MasterData/WALs/017dd09fb407,44717,1733617178577/017dd09fb407%2C44717%2C1733617178577.1733617180534, exclude list is [], retry=0 2024-12-08T00:19:40,568 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42385,DS-73f1f6d7-b1cd-475e-8dda-79b8d066a218,DISK] 2024-12-08T00:19:40,571 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-08T00:19:40,611 INFO [master/017dd09fb407:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/MasterData/WALs/017dd09fb407,44717,1733617178577/017dd09fb407%2C44717%2C1733617178577.1733617180534 2024-12-08T00:19:40,612 DEBUG [master/017dd09fb407:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42901:42901)] 2024-12-08T00:19:40,613 DEBUG [master/017dd09fb407:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-08T00:19:40,613 DEBUG [master/017dd09fb407:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:19:40,616 DEBUG [master/017dd09fb407:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:19:40,617 DEBUG [master/017dd09fb407:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:19:40,656 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:19:40,683 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-08T00:19:40,687 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:19:40,690 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:19:40,691 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:19:40,695 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-08T00:19:40,695 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:19:40,696 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:19:40,697 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:19:40,700 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-08T00:19:40,700 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:19:40,702 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:19:40,702 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:19:40,704 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-08T00:19:40,705 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:19:40,706 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:19:40,709 DEBUG [master/017dd09fb407:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:19:40,710 DEBUG [master/017dd09fb407:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:19:40,719 DEBUG [master/017dd09fb407:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-08T00:19:40,722 DEBUG [master/017dd09fb407:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:19:40,727 DEBUG [master/017dd09fb407:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T00:19:40,728 INFO [master/017dd09fb407:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73504915, jitterRate=0.0953085869550705}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-08T00:19:40,733 DEBUG [master/017dd09fb407:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-08T00:19:40,734 INFO [master/017dd09fb407:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-08T00:19:40,762 DEBUG [master/017dd09fb407:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47113034, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:19:40,797 INFO [master/017dd09fb407:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-08T00:19:40,808 INFO [master/017dd09fb407:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-08T00:19:40,809 INFO [master/017dd09fb407:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-08T00:19:40,811 INFO [master/017dd09fb407:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-08T00:19:40,812 INFO [master/017dd09fb407:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 1 msec 2024-12-08T00:19:40,817 INFO [master/017dd09fb407:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 4 msec 2024-12-08T00:19:40,817 INFO [master/017dd09fb407:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-08T00:19:40,842 INFO [master/017dd09fb407:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-08T00:19:40,854 DEBUG [master/017dd09fb407:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44717-0x1006efe8d8b0000, quorum=127.0.0.1:62287, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-08T00:19:40,857 DEBUG [master/017dd09fb407:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-08T00:19:40,859 INFO [master/017dd09fb407:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-08T00:19:40,860 DEBUG [master/017dd09fb407:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44717-0x1006efe8d8b0000, quorum=127.0.0.1:62287, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-08T00:19:40,862 DEBUG [master/017dd09fb407:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-08T00:19:40,864 INFO [master/017dd09fb407:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-08T00:19:40,867 DEBUG [master/017dd09fb407:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44717-0x1006efe8d8b0000, quorum=127.0.0.1:62287, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-08T00:19:40,868 DEBUG [master/017dd09fb407:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-08T00:19:40,869 DEBUG [master/017dd09fb407:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44717-0x1006efe8d8b0000, quorum=127.0.0.1:62287, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-08T00:19:40,871 DEBUG [master/017dd09fb407:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-08T00:19:40,880 DEBUG [master/017dd09fb407:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44717-0x1006efe8d8b0000, quorum=127.0.0.1:62287, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-08T00:19:40,882 DEBUG [master/017dd09fb407:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-08T00:19:40,886 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44717-0x1006efe8d8b0000, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T00:19:40,886 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36703-0x1006efe8d8b0001, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T00:19:40,886 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44717-0x1006efe8d8b0000, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:19:40,886 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36703-0x1006efe8d8b0001, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:19:40,887 INFO [master/017dd09fb407:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=017dd09fb407,44717,1733617178577, sessionid=0x1006efe8d8b0000, setting cluster-up flag (Was=false) 2024-12-08T00:19:40,898 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44717-0x1006efe8d8b0000, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:19:40,898 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36703-0x1006efe8d8b0001, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:19:40,904 DEBUG [master/017dd09fb407:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-08T00:19:40,905 DEBUG [master/017dd09fb407:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=017dd09fb407,44717,1733617178577 2024-12-08T00:19:40,910 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36703-0x1006efe8d8b0001, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:19:40,910 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44717-0x1006efe8d8b0000, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:19:40,916 DEBUG [master/017dd09fb407:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-08T00:19:40,918 DEBUG [master/017dd09fb407:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=017dd09fb407,44717,1733617178577 2024-12-08T00:19:40,999 DEBUG [RS:0;017dd09fb407:36703 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;017dd09fb407:36703 2024-12-08T00:19:41,000 INFO [RS:0;017dd09fb407:36703 {}] regionserver.HRegionServer(1008): ClusterId : a47f801a-c3bd-4432-bfde-d2bdba82664b 2024-12-08T00:19:41,003 DEBUG [RS:0;017dd09fb407:36703 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-08T00:19:41,007 DEBUG [master/017dd09fb407:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-08T00:19:41,008 DEBUG [RS:0;017dd09fb407:36703 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-08T00:19:41,009 DEBUG [RS:0;017dd09fb407:36703 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-08T00:19:41,011 DEBUG [RS:0;017dd09fb407:36703 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-08T00:19:41,012 DEBUG [RS:0;017dd09fb407:36703 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5985460a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:19:41,014 DEBUG [RS:0;017dd09fb407:36703 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@220cbbc3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=017dd09fb407/172.17.0.2:0 2024-12-08T00:19:41,014 INFO [master/017dd09fb407:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-08T00:19:41,016 INFO [master/017dd09fb407:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-08T00:19:41,016 INFO [RS:0;017dd09fb407:36703 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-08T00:19:41,017 INFO [RS:0;017dd09fb407:36703 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-08T00:19:41,017 DEBUG [RS:0;017dd09fb407:36703 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-08T00:19:41,019 INFO [RS:0;017dd09fb407:36703 {}] regionserver.HRegionServer(3073): reportForDuty to master=017dd09fb407,44717,1733617178577 with isa=017dd09fb407/172.17.0.2:36703, startcode=1733617179335 2024-12-08T00:19:41,022 DEBUG [master/017dd09fb407:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 017dd09fb407,44717,1733617178577 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-08T00:19:41,026 DEBUG [master/017dd09fb407:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/017dd09fb407:0, corePoolSize=5, maxPoolSize=5 2024-12-08T00:19:41,026 DEBUG [master/017dd09fb407:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/017dd09fb407:0, corePoolSize=5, maxPoolSize=5 2024-12-08T00:19:41,026 DEBUG [master/017dd09fb407:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/017dd09fb407:0, corePoolSize=5, maxPoolSize=5 2024-12-08T00:19:41,026 DEBUG [master/017dd09fb407:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/017dd09fb407:0, corePoolSize=5, maxPoolSize=5 2024-12-08T00:19:41,026 DEBUG [master/017dd09fb407:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/017dd09fb407:0, corePoolSize=10, maxPoolSize=10 2024-12-08T00:19:41,027 DEBUG [master/017dd09fb407:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/017dd09fb407:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:19:41,027 DEBUG [master/017dd09fb407:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/017dd09fb407:0, corePoolSize=2, maxPoolSize=2 2024-12-08T00:19:41,027 DEBUG [master/017dd09fb407:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/017dd09fb407:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:19:41,028 INFO [master/017dd09fb407:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733617211028 2024-12-08T00:19:41,030 INFO [master/017dd09fb407:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-08T00:19:41,031 DEBUG [RS:0;017dd09fb407:36703 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-08T00:19:41,032 INFO [master/017dd09fb407:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-08T00:19:41,033 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-08T00:19:41,033 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-08T00:19:41,036 INFO [master/017dd09fb407:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-08T00:19:41,036 INFO [master/017dd09fb407:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-08T00:19:41,036 INFO [master/017dd09fb407:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-08T00:19:41,037 INFO [master/017dd09fb407:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-08T00:19:41,037 INFO [master/017dd09fb407:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-08T00:19:41,038 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:19:41,038 INFO [master/017dd09fb407:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-08T00:19:41,038 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-08T00:19:41,039 INFO [master/017dd09fb407:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-08T00:19:41,040 INFO [master/017dd09fb407:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-08T00:19:41,047 INFO [master/017dd09fb407:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-08T00:19:41,047 INFO [master/017dd09fb407:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-08T00:19:41,049 DEBUG [master/017dd09fb407:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/017dd09fb407:0:becomeActiveMaster-HFileCleaner.large.0-1733617181048,5,FailOnTimeoutGroup] 2024-12-08T00:19:41,049 DEBUG [master/017dd09fb407:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/017dd09fb407:0:becomeActiveMaster-HFileCleaner.small.0-1733617181049,5,FailOnTimeoutGroup] 2024-12-08T00:19:41,050 INFO [master/017dd09fb407:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-08T00:19:41,050 INFO [master/017dd09fb407:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-08T00:19:41,051 INFO [master/017dd09fb407:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-08T00:19:41,051 INFO [master/017dd09fb407:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-08T00:19:41,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741831_1007 (size=1039) 2024-12-08T00:19:41,056 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-08T00:19:41,056 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3 2024-12-08T00:19:41,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741832_1008 (size=32) 2024-12-08T00:19:41,070 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56135, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-08T00:19:41,075 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44717 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 017dd09fb407,36703,1733617179335 2024-12-08T00:19:41,078 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44717 {}] master.ServerManager(486): Registering regionserver=017dd09fb407,36703,1733617179335 2024-12-08T00:19:41,092 DEBUG [RS:0;017dd09fb407:36703 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3 2024-12-08T00:19:41,092 DEBUG [RS:0;017dd09fb407:36703 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:46183 2024-12-08T00:19:41,092 DEBUG [RS:0;017dd09fb407:36703 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-08T00:19:41,096 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44717-0x1006efe8d8b0000, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T00:19:41,097 DEBUG [RS:0;017dd09fb407:36703 {}] zookeeper.ZKUtil(111): regionserver:36703-0x1006efe8d8b0001, quorum=127.0.0.1:62287, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/017dd09fb407,36703,1733617179335 2024-12-08T00:19:41,097 WARN [RS:0;017dd09fb407:36703 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T00:19:41,097 INFO [RS:0;017dd09fb407:36703 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-08T00:19:41,097 DEBUG [RS:0;017dd09fb407:36703 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/WALs/017dd09fb407,36703,1733617179335 2024-12-08T00:19:41,099 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [017dd09fb407,36703,1733617179335] 2024-12-08T00:19:41,113 DEBUG [RS:0;017dd09fb407:36703 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-08T00:19:41,128 INFO [RS:0;017dd09fb407:36703 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-08T00:19:41,145 INFO [RS:0;017dd09fb407:36703 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-08T00:19:41,149 INFO [RS:0;017dd09fb407:36703 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-08T00:19:41,149 INFO [RS:0;017dd09fb407:36703 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T00:19:41,150 INFO [RS:0;017dd09fb407:36703 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-08T00:19:41,159 INFO [RS:0;017dd09fb407:36703 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-08T00:19:41,159 DEBUG [RS:0;017dd09fb407:36703 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/017dd09fb407:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:19:41,159 DEBUG [RS:0;017dd09fb407:36703 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/017dd09fb407:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:19:41,159 DEBUG [RS:0;017dd09fb407:36703 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/017dd09fb407:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:19:41,159 DEBUG [RS:0;017dd09fb407:36703 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/017dd09fb407:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:19:41,159 DEBUG [RS:0;017dd09fb407:36703 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/017dd09fb407:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:19:41,160 DEBUG [RS:0;017dd09fb407:36703 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/017dd09fb407:0, corePoolSize=2, maxPoolSize=2 2024-12-08T00:19:41,160 DEBUG [RS:0;017dd09fb407:36703 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/017dd09fb407:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:19:41,160 DEBUG [RS:0;017dd09fb407:36703 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/017dd09fb407:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:19:41,160 DEBUG [RS:0;017dd09fb407:36703 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/017dd09fb407:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:19:41,160 DEBUG [RS:0;017dd09fb407:36703 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/017dd09fb407:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:19:41,160 DEBUG [RS:0;017dd09fb407:36703 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/017dd09fb407:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:19:41,161 DEBUG [RS:0;017dd09fb407:36703 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/017dd09fb407:0, corePoolSize=3, maxPoolSize=3 2024-12-08T00:19:41,161 DEBUG [RS:0;017dd09fb407:36703 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0, corePoolSize=3, maxPoolSize=3 2024-12-08T00:19:41,161 INFO [RS:0;017dd09fb407:36703 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T00:19:41,162 INFO [RS:0;017dd09fb407:36703 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T00:19:41,162 INFO [RS:0;017dd09fb407:36703 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-08T00:19:41,162 INFO [RS:0;017dd09fb407:36703 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-08T00:19:41,162 INFO [RS:0;017dd09fb407:36703 {}] hbase.ChoreService(168): Chore ScheduledChore name=017dd09fb407,36703,1733617179335-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T00:19:41,181 INFO [RS:0;017dd09fb407:36703 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-08T00:19:41,183 INFO [RS:0;017dd09fb407:36703 {}] hbase.ChoreService(168): Chore ScheduledChore name=017dd09fb407,36703,1733617179335-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T00:19:41,202 INFO [RS:0;017dd09fb407:36703 {}] regionserver.Replication(204): 017dd09fb407,36703,1733617179335 started 2024-12-08T00:19:41,202 INFO [RS:0;017dd09fb407:36703 {}] regionserver.HRegionServer(1767): Serving as 017dd09fb407,36703,1733617179335, RpcServer on 017dd09fb407/172.17.0.2:36703, sessionid=0x1006efe8d8b0001 2024-12-08T00:19:41,203 DEBUG [RS:0;017dd09fb407:36703 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-08T00:19:41,203 DEBUG [RS:0;017dd09fb407:36703 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 017dd09fb407,36703,1733617179335 2024-12-08T00:19:41,203 DEBUG [RS:0;017dd09fb407:36703 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '017dd09fb407,36703,1733617179335' 2024-12-08T00:19:41,203 DEBUG [RS:0;017dd09fb407:36703 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-08T00:19:41,204 DEBUG [RS:0;017dd09fb407:36703 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-08T00:19:41,205 DEBUG [RS:0;017dd09fb407:36703 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-08T00:19:41,205 DEBUG [RS:0;017dd09fb407:36703 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-08T00:19:41,205 DEBUG [RS:0;017dd09fb407:36703 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 017dd09fb407,36703,1733617179335 2024-12-08T00:19:41,205 DEBUG [RS:0;017dd09fb407:36703 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '017dd09fb407,36703,1733617179335' 2024-12-08T00:19:41,205 DEBUG [RS:0;017dd09fb407:36703 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-08T00:19:41,205 DEBUG [RS:0;017dd09fb407:36703 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-08T00:19:41,206 DEBUG [RS:0;017dd09fb407:36703 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-08T00:19:41,206 INFO [RS:0;017dd09fb407:36703 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-08T00:19:41,206 INFO [RS:0;017dd09fb407:36703 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-08T00:19:41,312 INFO [RS:0;017dd09fb407:36703 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-08T00:19:41,316 INFO [RS:0;017dd09fb407:36703 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=017dd09fb407%2C36703%2C1733617179335, suffix=, logDir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/WALs/017dd09fb407,36703,1733617179335, archiveDir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/oldWALs, maxLogs=32 2024-12-08T00:19:41,333 DEBUG [RS:0;017dd09fb407:36703 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/WALs/017dd09fb407,36703,1733617179335/017dd09fb407%2C36703%2C1733617179335.1733617181318, exclude list is [], retry=0 2024-12-08T00:19:41,338 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42385,DS-73f1f6d7-b1cd-475e-8dda-79b8d066a218,DISK] 2024-12-08T00:19:41,341 INFO [RS:0;017dd09fb407:36703 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/WALs/017dd09fb407,36703,1733617179335/017dd09fb407%2C36703%2C1733617179335.1733617181318 2024-12-08T00:19:41,341 DEBUG [RS:0;017dd09fb407:36703 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42901:42901)] 2024-12-08T00:19:41,469 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:19:41,472 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-08T00:19:41,474 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-08T00:19:41,475 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:19:41,475 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:19:41,476 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-08T00:19:41,478 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-08T00:19:41,478 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:19:41,479 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:19:41,479 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-08T00:19:41,481 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-08T00:19:41,482 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:19:41,482 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:19:41,484 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/hbase/meta/1588230740 2024-12-08T00:19:41,485 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/hbase/meta/1588230740 2024-12-08T00:19:41,488 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-08T00:19:41,490 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-08T00:19:41,494 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T00:19:41,495 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65058608, jitterRate=-0.03055119514465332}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-08T00:19:41,497 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-08T00:19:41,497 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-08T00:19:41,497 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-08T00:19:41,497 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-08T00:19:41,497 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-08T00:19:41,497 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-08T00:19:41,499 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-08T00:19:41,499 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-08T00:19:41,501 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-08T00:19:41,501 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-08T00:19:41,507 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-08T00:19:41,516 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-08T00:19:41,518 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-08T00:19:41,669 DEBUG [017dd09fb407:44717 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-08T00:19:41,674 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=017dd09fb407,36703,1733617179335 2024-12-08T00:19:41,679 INFO [PEWorker-2 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 017dd09fb407,36703,1733617179335, state=OPENING 2024-12-08T00:19:41,685 DEBUG [PEWorker-2 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-08T00:19:41,686 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36703-0x1006efe8d8b0001, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:19:41,686 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44717-0x1006efe8d8b0000, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:19:41,687 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T00:19:41,687 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T00:19:41,689 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=017dd09fb407,36703,1733617179335}] 2024-12-08T00:19:41,865 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:19:41,867 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-08T00:19:41,871 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59684, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-08T00:19:41,882 INFO [RS_OPEN_META-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-08T00:19:41,882 INFO [RS_OPEN_META-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-08T00:19:41,882 INFO [RS_OPEN_META-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-08T00:19:41,886 INFO [RS_OPEN_META-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=017dd09fb407%2C36703%2C1733617179335.meta, suffix=.meta, logDir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/WALs/017dd09fb407,36703,1733617179335, archiveDir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/oldWALs, maxLogs=32 2024-12-08T00:19:41,903 DEBUG [RS_OPEN_META-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/WALs/017dd09fb407,36703,1733617179335/017dd09fb407%2C36703%2C1733617179335.meta.1733617181888.meta, exclude list is [], retry=0 2024-12-08T00:19:41,907 DEBUG [RS-EventLoopGroup-3-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42385,DS-73f1f6d7-b1cd-475e-8dda-79b8d066a218,DISK] 2024-12-08T00:19:41,910 INFO [RS_OPEN_META-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/WALs/017dd09fb407,36703,1733617179335/017dd09fb407%2C36703%2C1733617179335.meta.1733617181888.meta 2024-12-08T00:19:41,910 DEBUG [RS_OPEN_META-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42901:42901)] 2024-12-08T00:19:41,911 DEBUG [RS_OPEN_META-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-08T00:19:41,912 DEBUG [RS_OPEN_META-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-08T00:19:41,971 DEBUG [RS_OPEN_META-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-08T00:19:41,976 INFO [RS_OPEN_META-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-08T00:19:41,981 DEBUG [RS_OPEN_META-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-08T00:19:41,981 DEBUG [RS_OPEN_META-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:19:41,981 DEBUG [RS_OPEN_META-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-08T00:19:41,981 DEBUG [RS_OPEN_META-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-08T00:19:41,984 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-08T00:19:41,986 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-08T00:19:41,986 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:19:41,987 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:19:41,987 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-08T00:19:41,989 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-08T00:19:41,989 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:19:41,990 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:19:41,990 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-08T00:19:41,991 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-08T00:19:41,991 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:19:41,992 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:19:41,993 DEBUG [RS_OPEN_META-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/hbase/meta/1588230740 2024-12-08T00:19:41,996 DEBUG [RS_OPEN_META-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/hbase/meta/1588230740 2024-12-08T00:19:41,999 DEBUG [RS_OPEN_META-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-08T00:19:42,002 DEBUG [RS_OPEN_META-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-08T00:19:42,003 INFO [RS_OPEN_META-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71308938, jitterRate=0.0625859797000885}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-08T00:19:42,005 DEBUG [RS_OPEN_META-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-08T00:19:42,012 INFO [RS_OPEN_META-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733617181859 2024-12-08T00:19:42,023 DEBUG [RS_OPEN_META-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-08T00:19:42,024 INFO [RS_OPEN_META-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-08T00:19:42,025 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=017dd09fb407,36703,1733617179335 2024-12-08T00:19:42,026 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 017dd09fb407,36703,1733617179335, state=OPEN 2024-12-08T00:19:42,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36703-0x1006efe8d8b0001, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T00:19:42,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44717-0x1006efe8d8b0000, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T00:19:42,031 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T00:19:42,031 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T00:19:42,035 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-08T00:19:42,035 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=017dd09fb407,36703,1733617179335 in 342 msec 2024-12-08T00:19:42,041 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-08T00:19:42,041 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 529 msec 2024-12-08T00:19:42,046 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 1.0920 sec 2024-12-08T00:19:42,046 INFO [master/017dd09fb407:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733617182046, completionTime=-1 2024-12-08T00:19:42,046 INFO [master/017dd09fb407:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-08T00:19:42,047 DEBUG [master/017dd09fb407:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-08T00:19:42,086 DEBUG [hconnection-0x4da31e77-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:19:42,088 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59690, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:19:42,098 INFO [master/017dd09fb407:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-08T00:19:42,098 INFO [master/017dd09fb407:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733617242098 2024-12-08T00:19:42,099 INFO [master/017dd09fb407:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733617302099 2024-12-08T00:19:42,099 INFO [master/017dd09fb407:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 51 msec 2024-12-08T00:19:42,120 INFO [master/017dd09fb407:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=017dd09fb407,44717,1733617178577-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T00:19:42,121 INFO [master/017dd09fb407:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=017dd09fb407,44717,1733617178577-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:19:42,121 INFO [master/017dd09fb407:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=017dd09fb407,44717,1733617178577-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:19:42,123 INFO [master/017dd09fb407:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-017dd09fb407:44717, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:19:42,123 INFO [master/017dd09fb407:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-08T00:19:42,127 DEBUG [master/017dd09fb407:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-08T00:19:42,131 INFO [master/017dd09fb407:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-08T00:19:42,133 INFO [master/017dd09fb407:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-08T00:19:42,140 DEBUG [master/017dd09fb407:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-08T00:19:42,143 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-08T00:19:42,144 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:19:42,146 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-08T00:19:42,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741835_1011 (size=358) 2024-12-08T00:19:42,561 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 4a133571fbb9d65d8cbb8c5be599e94a, NAME => 'hbase:namespace,,1733617182132.4a133571fbb9d65d8cbb8c5be599e94a.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3 2024-12-08T00:19:42,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741836_1012 (size=42) 2024-12-08T00:19:42,972 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733617182132.4a133571fbb9d65d8cbb8c5be599e94a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:19:42,972 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 4a133571fbb9d65d8cbb8c5be599e94a, disabling compactions & flushes 2024-12-08T00:19:42,972 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733617182132.4a133571fbb9d65d8cbb8c5be599e94a. 2024-12-08T00:19:42,972 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733617182132.4a133571fbb9d65d8cbb8c5be599e94a. 2024-12-08T00:19:42,972 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733617182132.4a133571fbb9d65d8cbb8c5be599e94a. after waiting 0 ms 2024-12-08T00:19:42,973 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733617182132.4a133571fbb9d65d8cbb8c5be599e94a. 2024-12-08T00:19:42,973 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1733617182132.4a133571fbb9d65d8cbb8c5be599e94a. 2024-12-08T00:19:42,973 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 4a133571fbb9d65d8cbb8c5be599e94a: 2024-12-08T00:19:42,976 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-08T00:19:42,984 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1733617182132.4a133571fbb9d65d8cbb8c5be599e94a.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1733617182977"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733617182977"}]},"ts":"1733617182977"} 2024-12-08T00:19:43,020 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-08T00:19:43,022 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-08T00:19:43,026 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733617183023"}]},"ts":"1733617183023"} 2024-12-08T00:19:43,032 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-08T00:19:43,038 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=4a133571fbb9d65d8cbb8c5be599e94a, ASSIGN}] 2024-12-08T00:19:43,042 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=4a133571fbb9d65d8cbb8c5be599e94a, ASSIGN 2024-12-08T00:19:43,043 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=4a133571fbb9d65d8cbb8c5be599e94a, ASSIGN; state=OFFLINE, location=017dd09fb407,36703,1733617179335; forceNewPlan=false, retain=false 2024-12-08T00:19:43,194 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=4a133571fbb9d65d8cbb8c5be599e94a, regionState=OPENING, regionLocation=017dd09fb407,36703,1733617179335 2024-12-08T00:19:43,230 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 4a133571fbb9d65d8cbb8c5be599e94a, server=017dd09fb407,36703,1733617179335}] 2024-12-08T00:19:43,386 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:19:43,395 INFO [RS_OPEN_PRIORITY_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1733617182132.4a133571fbb9d65d8cbb8c5be599e94a. 2024-12-08T00:19:43,396 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 4a133571fbb9d65d8cbb8c5be599e94a, NAME => 'hbase:namespace,,1733617182132.4a133571fbb9d65d8cbb8c5be599e94a.', STARTKEY => '', ENDKEY => ''} 2024-12-08T00:19:43,396 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 4a133571fbb9d65d8cbb8c5be599e94a 2024-12-08T00:19:43,396 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733617182132.4a133571fbb9d65d8cbb8c5be599e94a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:19:43,397 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 4a133571fbb9d65d8cbb8c5be599e94a 2024-12-08T00:19:43,397 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 4a133571fbb9d65d8cbb8c5be599e94a 2024-12-08T00:19:43,402 INFO [StoreOpener-4a133571fbb9d65d8cbb8c5be599e94a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 4a133571fbb9d65d8cbb8c5be599e94a 2024-12-08T00:19:43,404 INFO [StoreOpener-4a133571fbb9d65d8cbb8c5be599e94a-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4a133571fbb9d65d8cbb8c5be599e94a columnFamilyName info 2024-12-08T00:19:43,404 DEBUG [StoreOpener-4a133571fbb9d65d8cbb8c5be599e94a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:19:43,405 INFO [StoreOpener-4a133571fbb9d65d8cbb8c5be599e94a-1 {}] regionserver.HStore(327): Store=4a133571fbb9d65d8cbb8c5be599e94a/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:19:43,408 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/hbase/namespace/4a133571fbb9d65d8cbb8c5be599e94a 2024-12-08T00:19:43,409 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/hbase/namespace/4a133571fbb9d65d8cbb8c5be599e94a 2024-12-08T00:19:43,416 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 4a133571fbb9d65d8cbb8c5be599e94a 2024-12-08T00:19:43,425 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/hbase/namespace/4a133571fbb9d65d8cbb8c5be599e94a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T00:19:43,426 INFO [RS_OPEN_PRIORITY_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 4a133571fbb9d65d8cbb8c5be599e94a; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73618146, jitterRate=0.09699586033821106}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-08T00:19:43,427 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 4a133571fbb9d65d8cbb8c5be599e94a: 2024-12-08T00:19:43,430 INFO [RS_OPEN_PRIORITY_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1733617182132.4a133571fbb9d65d8cbb8c5be599e94a., pid=6, masterSystemTime=1733617183386 2024-12-08T00:19:43,435 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1733617182132.4a133571fbb9d65d8cbb8c5be599e94a. 2024-12-08T00:19:43,435 INFO [RS_OPEN_PRIORITY_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1733617182132.4a133571fbb9d65d8cbb8c5be599e94a. 2024-12-08T00:19:43,437 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=4a133571fbb9d65d8cbb8c5be599e94a, regionState=OPEN, openSeqNum=2, regionLocation=017dd09fb407,36703,1733617179335 2024-12-08T00:19:43,439 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44717 {}] assignment.AssignmentManager(1526): Unable to acquire lock for regionNode state=OPEN, location=017dd09fb407,36703,1733617179335, table=hbase:namespace, region=4a133571fbb9d65d8cbb8c5be599e94a. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-08T00:19:43,446 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-08T00:19:43,448 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 4a133571fbb9d65d8cbb8c5be599e94a, server=017dd09fb407,36703,1733617179335 in 212 msec 2024-12-08T00:19:43,451 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-08T00:19:43,451 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=4a133571fbb9d65d8cbb8c5be599e94a, ASSIGN in 408 msec 2024-12-08T00:19:43,452 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-08T00:19:43,453 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733617183452"}]},"ts":"1733617183452"} 2024-12-08T00:19:43,455 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-08T00:19:43,459 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-08T00:19:43,462 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 1.3250 sec 2024-12-08T00:19:43,544 DEBUG [master/017dd09fb407:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:44717-0x1006efe8d8b0000, quorum=127.0.0.1:62287, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-08T00:19:43,546 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44717-0x1006efe8d8b0000, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-08T00:19:43,546 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36703-0x1006efe8d8b0001, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:19:43,546 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44717-0x1006efe8d8b0000, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:19:43,582 DEBUG [master/017dd09fb407:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-08T00:19:43,601 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44717-0x1006efe8d8b0000, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-08T00:19:43,609 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 31 msec 2024-12-08T00:19:43,618 DEBUG [master/017dd09fb407:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-08T00:19:43,634 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44717-0x1006efe8d8b0000, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-08T00:19:43,641 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 22 msec 2024-12-08T00:19:43,660 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44717-0x1006efe8d8b0000, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-08T00:19:43,663 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44717-0x1006efe8d8b0000, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-08T00:19:43,663 INFO [master/017dd09fb407:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 4.246sec 2024-12-08T00:19:43,665 INFO [master/017dd09fb407:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-08T00:19:43,667 INFO [master/017dd09fb407:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-08T00:19:43,668 INFO [master/017dd09fb407:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-08T00:19:43,669 INFO [master/017dd09fb407:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-08T00:19:43,669 INFO [master/017dd09fb407:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-08T00:19:43,670 INFO [master/017dd09fb407:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=017dd09fb407,44717,1733617178577-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T00:19:43,671 INFO [master/017dd09fb407:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=017dd09fb407,44717,1733617178577-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-08T00:19:43,681 DEBUG [master/017dd09fb407:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-08T00:19:43,683 INFO [master/017dd09fb407:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-08T00:19:43,683 INFO [master/017dd09fb407:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=017dd09fb407,44717,1733617178577-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:19:43,703 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5e83c466 to 127.0.0.1:62287 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@39dee83f 2024-12-08T00:19:43,704 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-12-08T00:19:43,720 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@67b8b597, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:19:43,725 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-08T00:19:43,726 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-08T00:19:43,744 DEBUG [hconnection-0x4c09ef46-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:19:43,757 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52306, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:19:43,771 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=017dd09fb407,44717,1733617178577 2024-12-08T00:19:43,795 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=219, OpenFileDescriptor=444, MaxFileDescriptor=1048576, SystemLoadAverage=219, ProcessCount=11, AvailableMemoryMB=8951 2024-12-08T00:19:43,811 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-08T00:19:43,815 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60374, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-08T00:19:43,825 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-08T00:19:43,829 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T00:19:43,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-08T00:19:43,836 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-08T00:19:43,837 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 9 2024-12-08T00:19:43,837 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:19:43,839 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-08T00:19:43,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-08T00:19:43,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741837_1013 (size=963) 2024-12-08T00:19:43,871 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3 2024-12-08T00:19:43,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741838_1014 (size=53) 2024-12-08T00:19:43,895 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:19:43,895 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing f51bdc360ee4fbe2f9447c9b6b4bf1ce, disabling compactions & flushes 2024-12-08T00:19:43,895 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:43,895 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:43,895 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. after waiting 0 ms 2024-12-08T00:19:43,895 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:43,895 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:43,896 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:19:43,899 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-08T00:19:43,899 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733617183899"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733617183899"}]},"ts":"1733617183899"} 2024-12-08T00:19:43,903 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-08T00:19:43,905 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-08T00:19:43,906 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733617183905"}]},"ts":"1733617183905"} 2024-12-08T00:19:43,909 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-08T00:19:43,914 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=f51bdc360ee4fbe2f9447c9b6b4bf1ce, ASSIGN}] 2024-12-08T00:19:43,917 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=f51bdc360ee4fbe2f9447c9b6b4bf1ce, ASSIGN 2024-12-08T00:19:43,919 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=f51bdc360ee4fbe2f9447c9b6b4bf1ce, ASSIGN; state=OFFLINE, location=017dd09fb407,36703,1733617179335; forceNewPlan=false, retain=false 2024-12-08T00:19:43,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-08T00:19:44,069 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=f51bdc360ee4fbe2f9447c9b6b4bf1ce, regionState=OPENING, regionLocation=017dd09fb407,36703,1733617179335 2024-12-08T00:19:44,073 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335}] 2024-12-08T00:19:44,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-08T00:19:44,227 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:19:44,237 INFO [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:44,237 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} 2024-12-08T00:19:44,238 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees f51bdc360ee4fbe2f9447c9b6b4bf1ce 2024-12-08T00:19:44,238 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:19:44,238 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for f51bdc360ee4fbe2f9447c9b6b4bf1ce 2024-12-08T00:19:44,238 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for f51bdc360ee4fbe2f9447c9b6b4bf1ce 2024-12-08T00:19:44,242 INFO [StoreOpener-f51bdc360ee4fbe2f9447c9b6b4bf1ce-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region f51bdc360ee4fbe2f9447c9b6b4bf1ce 2024-12-08T00:19:44,249 INFO [StoreOpener-f51bdc360ee4fbe2f9447c9b6b4bf1ce-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-08T00:19:44,249 INFO [StoreOpener-f51bdc360ee4fbe2f9447c9b6b4bf1ce-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f51bdc360ee4fbe2f9447c9b6b4bf1ce columnFamilyName A 2024-12-08T00:19:44,249 DEBUG [StoreOpener-f51bdc360ee4fbe2f9447c9b6b4bf1ce-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:19:44,251 INFO [StoreOpener-f51bdc360ee4fbe2f9447c9b6b4bf1ce-1 {}] regionserver.HStore(327): Store=f51bdc360ee4fbe2f9447c9b6b4bf1ce/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:19:44,251 INFO [StoreOpener-f51bdc360ee4fbe2f9447c9b6b4bf1ce-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region f51bdc360ee4fbe2f9447c9b6b4bf1ce 2024-12-08T00:19:44,255 INFO [StoreOpener-f51bdc360ee4fbe2f9447c9b6b4bf1ce-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-08T00:19:44,256 INFO [StoreOpener-f51bdc360ee4fbe2f9447c9b6b4bf1ce-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f51bdc360ee4fbe2f9447c9b6b4bf1ce columnFamilyName B 2024-12-08T00:19:44,256 DEBUG [StoreOpener-f51bdc360ee4fbe2f9447c9b6b4bf1ce-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:19:44,257 INFO [StoreOpener-f51bdc360ee4fbe2f9447c9b6b4bf1ce-1 {}] regionserver.HStore(327): Store=f51bdc360ee4fbe2f9447c9b6b4bf1ce/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:19:44,258 INFO [StoreOpener-f51bdc360ee4fbe2f9447c9b6b4bf1ce-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region f51bdc360ee4fbe2f9447c9b6b4bf1ce 2024-12-08T00:19:44,260 INFO [StoreOpener-f51bdc360ee4fbe2f9447c9b6b4bf1ce-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-08T00:19:44,261 INFO [StoreOpener-f51bdc360ee4fbe2f9447c9b6b4bf1ce-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f51bdc360ee4fbe2f9447c9b6b4bf1ce columnFamilyName C 2024-12-08T00:19:44,261 DEBUG [StoreOpener-f51bdc360ee4fbe2f9447c9b6b4bf1ce-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:19:44,262 INFO [StoreOpener-f51bdc360ee4fbe2f9447c9b6b4bf1ce-1 {}] regionserver.HStore(327): Store=f51bdc360ee4fbe2f9447c9b6b4bf1ce/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:19:44,262 INFO [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:44,264 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce 2024-12-08T00:19:44,265 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce 2024-12-08T00:19:44,268 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-08T00:19:44,271 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for f51bdc360ee4fbe2f9447c9b6b4bf1ce 2024-12-08T00:19:44,279 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T00:19:44,280 INFO [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened f51bdc360ee4fbe2f9447c9b6b4bf1ce; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70286531, jitterRate=0.0473509281873703}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-08T00:19:44,282 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:19:44,283 INFO [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce., pid=11, masterSystemTime=1733617184227 2024-12-08T00:19:44,289 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=f51bdc360ee4fbe2f9447c9b6b4bf1ce, regionState=OPEN, openSeqNum=2, regionLocation=017dd09fb407,36703,1733617179335 2024-12-08T00:19:44,290 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:44,290 INFO [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:44,299 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-08T00:19:44,299 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 in 222 msec 2024-12-08T00:19:44,303 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-08T00:19:44,303 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=f51bdc360ee4fbe2f9447c9b6b4bf1ce, ASSIGN in 385 msec 2024-12-08T00:19:44,305 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-08T00:19:44,306 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733617184305"}]},"ts":"1733617184305"} 2024-12-08T00:19:44,309 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-08T00:19:44,314 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-08T00:19:44,317 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 485 msec 2024-12-08T00:19:44,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-08T00:19:44,459 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 9 completed 2024-12-08T00:19:44,463 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0e98ea32 to 127.0.0.1:62287 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3b9fcedf 2024-12-08T00:19:44,467 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3e71e468, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:19:44,470 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:19:44,473 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52320, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:19:44,478 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-08T00:19:44,480 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60386, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-08T00:19:44,499 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x12885408 to 127.0.0.1:62287 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@9bd0964 2024-12-08T00:19:44,505 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6c63ae4e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:19:44,507 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x72b32f98 to 127.0.0.1:62287 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1324ee83 2024-12-08T00:19:44,515 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@736f1673, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:19:44,517 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x04977266 to 127.0.0.1:62287 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@45b55c24 2024-12-08T00:19:44,527 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4ee2166f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:19:44,529 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6bbb5d8a to 127.0.0.1:62287 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@48068a5 2024-12-08T00:19:44,539 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f34ff67, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:19:44,541 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x18603bb9 to 127.0.0.1:62287 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3883f7b 2024-12-08T00:19:44,553 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b5f27aa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:19:44,555 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x72e97e4b to 127.0.0.1:62287 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@12a1285d 2024-12-08T00:19:44,566 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c3b736e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:19:44,567 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x490457fd to 127.0.0.1:62287 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@527c6d40 2024-12-08T00:19:44,578 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@353bc462, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:19:44,580 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2c8de680 to 127.0.0.1:62287 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@47fe2fa7 2024-12-08T00:19:44,587 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6502d571, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:19:44,589 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6f6b07e3 to 127.0.0.1:62287 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@595e9ebe 2024-12-08T00:19:44,599 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2a0471b9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:19:44,608 DEBUG [hconnection-0x53f1d8ba-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:19:44,609 DEBUG [hconnection-0x703098f9-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:19:44,615 DEBUG [hconnection-0x4360c845-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:19:44,618 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52332, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:19:44,626 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52342, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:19:44,627 DEBUG [hconnection-0x42186a3-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:19:44,628 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52350, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:19:44,631 DEBUG [hconnection-0x2dd15ceb-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:19:44,638 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52364, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:19:44,646 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52366, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:19:44,696 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T00:19:44,696 DEBUG [hconnection-0x11fa2cd8-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:19:44,647 DEBUG [hconnection-0x7b5d524b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:19:44,707 DEBUG [hconnection-0x293c9f96-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:19:44,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees 2024-12-08T00:19:44,713 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:19:44,714 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:19:44,716 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:19:44,718 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52382, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:19:44,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-08T00:19:44,734 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52398, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:19:44,738 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52410, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:19:44,741 DEBUG [hconnection-0x19e0dde4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:19:44,754 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f51bdc360ee4fbe2f9447c9b6b4bf1ce 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-08T00:19:44,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on f51bdc360ee4fbe2f9447c9b6b4bf1ce 2024-12-08T00:19:44,763 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52420, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:19:44,768 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=A 2024-12-08T00:19:44,770 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:44,771 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=B 2024-12-08T00:19:44,772 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:44,772 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=C 2024-12-08T00:19:44,772 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:44,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-08T00:19:44,889 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/a2ccce83506a44f0880725901a10cce4 is 50, key is test_row_0/A:col10/1733617184749/Put/seqid=0 2024-12-08T00:19:44,897 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:19:44,903 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-08T00:19:44,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:44,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. as already flushing 2024-12-08T00:19:44,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:44,949 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:44,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:44,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:44,970 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:44,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617244951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:44,973 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:44,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617244953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:44,974 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:44,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617244954, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:44,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741839_1015 (size=12001) 2024-12-08T00:19:44,984 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/a2ccce83506a44f0880725901a10cce4 2024-12-08T00:19:44,986 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:44,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617244967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:44,988 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:44,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52398 deadline: 1733617244970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:45,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-08T00:19:45,113 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:45,113 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:45,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52398 deadline: 1733617245109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:45,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617245110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:45,115 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:45,115 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:45,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617245110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:45,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617245112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:45,116 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:19:45,116 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-08T00:19:45,121 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:45,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617245115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:45,125 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/34263715e7514fa3a03d6d00b493a229 is 50, key is test_row_0/B:col10/1733617184749/Put/seqid=0 2024-12-08T00:19:45,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:45,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. as already flushing 2024-12-08T00:19:45,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:45,136 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:45,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:45,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:45,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741840_1016 (size=12001) 2024-12-08T00:19:45,289 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:19:45,291 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-08T00:19:45,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:45,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. as already flushing 2024-12-08T00:19:45,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:45,323 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:45,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:45,325 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:45,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617245324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:45,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:45,326 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:45,326 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:45,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617245321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:45,327 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:45,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617245321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:45,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52398 deadline: 1733617245324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:45,328 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:45,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617245326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:45,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-08T00:19:45,479 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:19:45,481 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-08T00:19:45,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:45,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. as already flushing 2024-12-08T00:19:45,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:45,481 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:45,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:45,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:45,566 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/34263715e7514fa3a03d6d00b493a229 2024-12-08T00:19:45,633 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:45,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617245632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:45,634 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:45,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52398 deadline: 1733617245632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:45,636 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:19:45,636 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:45,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617245633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:45,636 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-08T00:19:45,643 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:45,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617245642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:45,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:45,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. as already flushing 2024-12-08T00:19:45,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:45,644 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:45,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:45,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:45,646 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:45,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617245643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:45,660 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/190177e2c3354f6e81321ef8501cc766 is 50, key is test_row_0/C:col10/1733617184749/Put/seqid=0 2024-12-08T00:19:45,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741841_1017 (size=12001) 2024-12-08T00:19:45,800 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:19:45,801 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-08T00:19:45,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:45,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. as already flushing 2024-12-08T00:19:45,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:45,802 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:45,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:45,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:45,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-08T00:19:45,977 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:19:45,977 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-08T00:19:45,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:45,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. as already flushing 2024-12-08T00:19:45,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:45,979 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:45,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:45,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:46,089 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/190177e2c3354f6e81321ef8501cc766 2024-12-08T00:19:46,110 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/a2ccce83506a44f0880725901a10cce4 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/a2ccce83506a44f0880725901a10cce4 2024-12-08T00:19:46,132 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:19:46,133 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-08T00:19:46,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:46,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. as already flushing 2024-12-08T00:19:46,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:46,133 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/a2ccce83506a44f0880725901a10cce4, entries=150, sequenceid=14, filesize=11.7 K 2024-12-08T00:19:46,133 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:46,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:46,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:46,143 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/34263715e7514fa3a03d6d00b493a229 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/34263715e7514fa3a03d6d00b493a229 2024-12-08T00:19:46,167 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/34263715e7514fa3a03d6d00b493a229, entries=150, sequenceid=14, filesize=11.7 K 2024-12-08T00:19:46,171 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/190177e2c3354f6e81321ef8501cc766 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/190177e2c3354f6e81321ef8501cc766 2024-12-08T00:19:46,193 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/190177e2c3354f6e81321ef8501cc766, entries=150, sequenceid=14, filesize=11.7 K 2024-12-08T00:19:46,195 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for f51bdc360ee4fbe2f9447c9b6b4bf1ce in 1442ms, sequenceid=14, compaction requested=false 2024-12-08T00:19:46,197 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-08T00:19:46,210 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:19:46,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on f51bdc360ee4fbe2f9447c9b6b4bf1ce 2024-12-08T00:19:46,211 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f51bdc360ee4fbe2f9447c9b6b4bf1ce 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-08T00:19:46,212 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=A 2024-12-08T00:19:46,212 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:46,212 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=B 2024-12-08T00:19:46,212 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:46,213 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=C 2024-12-08T00:19:46,213 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:46,241 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/27cd56b0e84645e682fd1bd7f7c70b20 is 50, key is test_row_0/A:col10/1733617186204/Put/seqid=0 2024-12-08T00:19:46,248 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:46,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52398 deadline: 1733617246228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:46,249 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:46,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617246234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:46,250 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:46,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617246236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:46,260 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:46,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617246246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:46,262 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:46,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617246247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:46,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741842_1018 (size=21365) 2024-12-08T00:19:46,275 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/27cd56b0e84645e682fd1bd7f7c70b20 2024-12-08T00:19:46,287 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:19:46,288 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-08T00:19:46,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:46,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. as already flushing 2024-12-08T00:19:46,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:46,288 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:46,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:46,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:46,311 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/25990c73463446d79af966e66908fda8 is 50, key is test_row_0/B:col10/1733617186204/Put/seqid=0 2024-12-08T00:19:46,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741843_1019 (size=12001) 2024-12-08T00:19:46,344 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/25990c73463446d79af966e66908fda8 2024-12-08T00:19:46,355 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:46,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52398 deadline: 1733617246352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:46,371 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:46,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617246355, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:46,373 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:46,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617246356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:46,375 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:46,377 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:46,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617246364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:46,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617246363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:46,391 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/e83342bcdb7f40609d2ba7934abdf42a is 50, key is test_row_0/C:col10/1733617186204/Put/seqid=0 2024-12-08T00:19:46,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741844_1020 (size=12001) 2024-12-08T00:19:46,426 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/e83342bcdb7f40609d2ba7934abdf42a 2024-12-08T00:19:46,441 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/27cd56b0e84645e682fd1bd7f7c70b20 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/27cd56b0e84645e682fd1bd7f7c70b20 2024-12-08T00:19:46,442 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:19:46,443 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-08T00:19:46,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:46,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. as already flushing 2024-12-08T00:19:46,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:46,444 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:46,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:46,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:46,454 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/27cd56b0e84645e682fd1bd7f7c70b20, entries=350, sequenceid=41, filesize=20.9 K 2024-12-08T00:19:46,456 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/25990c73463446d79af966e66908fda8 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/25990c73463446d79af966e66908fda8 2024-12-08T00:19:46,475 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/25990c73463446d79af966e66908fda8, entries=150, sequenceid=41, filesize=11.7 K 2024-12-08T00:19:46,477 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/e83342bcdb7f40609d2ba7934abdf42a as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/e83342bcdb7f40609d2ba7934abdf42a 2024-12-08T00:19:46,500 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/e83342bcdb7f40609d2ba7934abdf42a, entries=150, sequenceid=41, filesize=11.7 K 2024-12-08T00:19:46,502 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=46.96 KB/48090 for f51bdc360ee4fbe2f9447c9b6b4bf1ce in 291ms, sequenceid=41, compaction requested=false 2024-12-08T00:19:46,502 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:19:46,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on f51bdc360ee4fbe2f9447c9b6b4bf1ce 2024-12-08T00:19:46,572 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f51bdc360ee4fbe2f9447c9b6b4bf1ce 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-08T00:19:46,572 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=A 2024-12-08T00:19:46,573 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:46,573 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=B 2024-12-08T00:19:46,573 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:46,573 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=C 2024-12-08T00:19:46,573 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:46,598 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:19:46,599 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-08T00:19:46,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:46,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. as already flushing 2024-12-08T00:19:46,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:46,600 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:46,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:46,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:46,607 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/4ebb49454477423c9c9f8d2357344e45 is 50, key is test_row_0/A:col10/1733617186239/Put/seqid=0 2024-12-08T00:19:46,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741845_1021 (size=14341) 2024-12-08T00:19:46,637 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/4ebb49454477423c9c9f8d2357344e45 2024-12-08T00:19:46,683 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/e7c38e344d2f44a0b1a280bb5532a5be is 50, key is test_row_0/B:col10/1733617186239/Put/seqid=0 2024-12-08T00:19:46,684 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:46,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617246662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:46,686 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:46,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617246663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:46,688 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:46,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617246665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:46,689 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:46,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52398 deadline: 1733617246667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:46,703 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:46,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617246671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:46,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741846_1022 (size=12001) 2024-12-08T00:19:46,729 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/e7c38e344d2f44a0b1a280bb5532a5be 2024-12-08T00:19:46,757 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:19:46,758 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-08T00:19:46,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:46,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. as already flushing 2024-12-08T00:19:46,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:46,759 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:46,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:46,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:46,772 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/0e3841fdc2124902a987c8ded69b9812 is 50, key is test_row_0/C:col10/1733617186239/Put/seqid=0 2024-12-08T00:19:46,796 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:46,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617246788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:46,798 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:46,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617246791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:46,806 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:46,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52398 deadline: 1733617246793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:46,807 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:46,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617246793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:46,809 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:46,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617246806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:46,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741847_1023 (size=12001) 2024-12-08T00:19:46,821 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/0e3841fdc2124902a987c8ded69b9812 2024-12-08T00:19:46,833 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/4ebb49454477423c9c9f8d2357344e45 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/4ebb49454477423c9c9f8d2357344e45 2024-12-08T00:19:46,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-08T00:19:46,850 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/4ebb49454477423c9c9f8d2357344e45, entries=200, sequenceid=52, filesize=14.0 K 2024-12-08T00:19:46,876 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/e7c38e344d2f44a0b1a280bb5532a5be as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/e7c38e344d2f44a0b1a280bb5532a5be 2024-12-08T00:19:46,897 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/e7c38e344d2f44a0b1a280bb5532a5be, entries=150, sequenceid=52, filesize=11.7 K 2024-12-08T00:19:46,903 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/0e3841fdc2124902a987c8ded69b9812 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/0e3841fdc2124902a987c8ded69b9812 2024-12-08T00:19:46,913 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/0e3841fdc2124902a987c8ded69b9812, entries=150, sequenceid=52, filesize=11.7 K 2024-12-08T00:19:46,915 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:19:46,916 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-08T00:19:46,916 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:46,916 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. as already flushing 2024-12-08T00:19:46,916 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for f51bdc360ee4fbe2f9447c9b6b4bf1ce in 344ms, sequenceid=52, compaction requested=true 2024-12-08T00:19:46,916 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:46,917 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:19:46,917 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:46,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:46,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:46,928 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f51bdc360ee4fbe2f9447c9b6b4bf1ce:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:19:46,928 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:19:46,928 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f51bdc360ee4fbe2f9447c9b6b4bf1ce:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T00:19:46,928 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:19:46,929 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f51bdc360ee4fbe2f9447c9b6b4bf1ce:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:19:46,929 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:19:46,929 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:19:46,929 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:19:46,934 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:19:46,936 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): f51bdc360ee4fbe2f9447c9b6b4bf1ce/B is initiating minor compaction (all files) 2024-12-08T00:19:46,936 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f51bdc360ee4fbe2f9447c9b6b4bf1ce/B in TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:46,936 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/34263715e7514fa3a03d6d00b493a229, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/25990c73463446d79af966e66908fda8, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/e7c38e344d2f44a0b1a280bb5532a5be] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp, totalSize=35.2 K 2024-12-08T00:19:46,937 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 34263715e7514fa3a03d6d00b493a229, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1733617184633 2024-12-08T00:19:46,938 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 25990c73463446d79af966e66908fda8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1733617186204 2024-12-08T00:19:46,939 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 47707 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:19:46,939 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting e7c38e344d2f44a0b1a280bb5532a5be, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733617186222 2024-12-08T00:19:46,939 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): f51bdc360ee4fbe2f9447c9b6b4bf1ce/A is initiating minor compaction (all files) 2024-12-08T00:19:46,939 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f51bdc360ee4fbe2f9447c9b6b4bf1ce/A in TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:46,941 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/a2ccce83506a44f0880725901a10cce4, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/27cd56b0e84645e682fd1bd7f7c70b20, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/4ebb49454477423c9c9f8d2357344e45] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp, totalSize=46.6 K 2024-12-08T00:19:46,942 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting a2ccce83506a44f0880725901a10cce4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1733617184633 2024-12-08T00:19:46,943 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 27cd56b0e84645e682fd1bd7f7c70b20, keycount=350, bloomtype=ROW, size=20.9 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1733617184913 2024-12-08T00:19:46,944 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4ebb49454477423c9c9f8d2357344e45, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733617186222 2024-12-08T00:19:47,016 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f51bdc360ee4fbe2f9447c9b6b4bf1ce#A#compaction#9 average throughput is 0.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:19:47,017 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/0a9061e76bda4f7bb2b81688e43f7c9d is 50, key is test_row_0/A:col10/1733617186239/Put/seqid=0 2024-12-08T00:19:47,022 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f51bdc360ee4fbe2f9447c9b6b4bf1ce#B#compaction#10 average throughput is 0.23 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:19:47,023 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/e7ef52cd1e7e4f259459431eae90125e is 50, key is test_row_0/B:col10/1733617186239/Put/seqid=0 2024-12-08T00:19:47,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on f51bdc360ee4fbe2f9447c9b6b4bf1ce 2024-12-08T00:19:47,033 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f51bdc360ee4fbe2f9447c9b6b4bf1ce 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-08T00:19:47,033 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=A 2024-12-08T00:19:47,034 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:47,034 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=B 2024-12-08T00:19:47,034 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:47,034 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=C 2024-12-08T00:19:47,034 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:47,071 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:19:47,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741848_1024 (size=12104) 2024-12-08T00:19:47,072 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-08T00:19:47,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:47,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. as already flushing 2024-12-08T00:19:47,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:47,072 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:47,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:47,073 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/906f61f5d8ed43428cee8080ab3600d9 is 50, key is test_row_0/A:col10/1733617186661/Put/seqid=0 2024-12-08T00:19:47,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:47,087 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/e7ef52cd1e7e4f259459431eae90125e as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/e7ef52cd1e7e4f259459431eae90125e 2024-12-08T00:19:47,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741849_1025 (size=12104) 2024-12-08T00:19:47,101 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:47,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617247050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:47,104 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:47,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617247051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:47,105 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:47,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617247062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:47,106 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:47,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617247072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:47,112 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:47,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52398 deadline: 1733617247075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:47,119 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f51bdc360ee4fbe2f9447c9b6b4bf1ce/B of f51bdc360ee4fbe2f9447c9b6b4bf1ce into e7ef52cd1e7e4f259459431eae90125e(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:19:47,120 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:19:47,120 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce., storeName=f51bdc360ee4fbe2f9447c9b6b4bf1ce/B, priority=13, startTime=1733617186928; duration=0sec 2024-12-08T00:19:47,120 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:19:47,120 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f51bdc360ee4fbe2f9447c9b6b4bf1ce:B 2024-12-08T00:19:47,120 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:19:47,127 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:19:47,127 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): f51bdc360ee4fbe2f9447c9b6b4bf1ce/C is initiating minor compaction (all files) 2024-12-08T00:19:47,127 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f51bdc360ee4fbe2f9447c9b6b4bf1ce/C in TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:47,128 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/190177e2c3354f6e81321ef8501cc766, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/e83342bcdb7f40609d2ba7934abdf42a, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/0e3841fdc2124902a987c8ded69b9812] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp, totalSize=35.2 K 2024-12-08T00:19:47,129 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 190177e2c3354f6e81321ef8501cc766, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1733617184633 2024-12-08T00:19:47,129 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting e83342bcdb7f40609d2ba7934abdf42a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1733617186204 2024-12-08T00:19:47,130 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 0e3841fdc2124902a987c8ded69b9812, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733617186222 2024-12-08T00:19:47,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741850_1026 (size=14341) 2024-12-08T00:19:47,137 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/906f61f5d8ed43428cee8080ab3600d9 2024-12-08T00:19:47,175 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/ef141dd317d34df090a18b52c92eb45b is 50, key is test_row_0/B:col10/1733617186661/Put/seqid=0 2024-12-08T00:19:47,191 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f51bdc360ee4fbe2f9447c9b6b4bf1ce#C#compaction#12 average throughput is 0.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:19:47,192 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/03e467cdefc84c2bb49ccdf542c47dfe is 50, key is test_row_0/C:col10/1733617186239/Put/seqid=0 2024-12-08T00:19:47,220 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:47,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617247204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:47,221 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:47,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617247207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:47,221 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:47,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617247208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:47,223 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:47,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617247215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:47,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741851_1027 (size=12001) 2024-12-08T00:19:47,226 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:47,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52398 deadline: 1733617247215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:47,226 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:19:47,226 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/ef141dd317d34df090a18b52c92eb45b 2024-12-08T00:19:47,227 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-08T00:19:47,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:47,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. as already flushing 2024-12-08T00:19:47,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:47,228 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:47,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:47,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:47,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741852_1028 (size=12104) 2024-12-08T00:19:47,263 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/03e467cdefc84c2bb49ccdf542c47dfe as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/03e467cdefc84c2bb49ccdf542c47dfe 2024-12-08T00:19:47,267 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/6b4e2b668da54e6793ef2403aa431c92 is 50, key is test_row_0/C:col10/1733617186661/Put/seqid=0 2024-12-08T00:19:47,282 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f51bdc360ee4fbe2f9447c9b6b4bf1ce/C of f51bdc360ee4fbe2f9447c9b6b4bf1ce into 03e467cdefc84c2bb49ccdf542c47dfe(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:19:47,282 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:19:47,282 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce., storeName=f51bdc360ee4fbe2f9447c9b6b4bf1ce/C, priority=13, startTime=1733617186928; duration=0sec 2024-12-08T00:19:47,283 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:19:47,283 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f51bdc360ee4fbe2f9447c9b6b4bf1ce:C 2024-12-08T00:19:47,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741853_1029 (size=12001) 2024-12-08T00:19:47,381 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:19:47,383 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-08T00:19:47,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:47,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. as already flushing 2024-12-08T00:19:47,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:47,384 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:47,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:47,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:47,432 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:47,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52398 deadline: 1733617247431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:47,448 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:47,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617247433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:47,450 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:47,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617247433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:47,451 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:47,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617247433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:47,452 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:47,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617247434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:47,496 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-08T00:19:47,521 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/0a9061e76bda4f7bb2b81688e43f7c9d as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/0a9061e76bda4f7bb2b81688e43f7c9d 2024-12-08T00:19:47,538 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f51bdc360ee4fbe2f9447c9b6b4bf1ce/A of f51bdc360ee4fbe2f9447c9b6b4bf1ce into 0a9061e76bda4f7bb2b81688e43f7c9d(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:19:47,538 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:19:47,538 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce., storeName=f51bdc360ee4fbe2f9447c9b6b4bf1ce/A, priority=13, startTime=1733617186919; duration=0sec 2024-12-08T00:19:47,539 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:19:47,539 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f51bdc360ee4fbe2f9447c9b6b4bf1ce:A 2024-12-08T00:19:47,540 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:19:47,541 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-08T00:19:47,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:47,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. as already flushing 2024-12-08T00:19:47,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:47,541 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:47,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:47,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:47,562 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-08T00:19:47,564 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-08T00:19:47,695 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:19:47,696 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-08T00:19:47,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:47,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. as already flushing 2024-12-08T00:19:47,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:47,697 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:47,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:47,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:47,714 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/6b4e2b668da54e6793ef2403aa431c92 2024-12-08T00:19:47,729 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/906f61f5d8ed43428cee8080ab3600d9 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/906f61f5d8ed43428cee8080ab3600d9 2024-12-08T00:19:47,737 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:47,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52398 deadline: 1733617247736, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:47,742 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/906f61f5d8ed43428cee8080ab3600d9, entries=200, sequenceid=78, filesize=14.0 K 2024-12-08T00:19:47,746 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/ef141dd317d34df090a18b52c92eb45b as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/ef141dd317d34df090a18b52c92eb45b 2024-12-08T00:19:47,761 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:47,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617247754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:47,762 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:47,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617247754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:47,763 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/ef141dd317d34df090a18b52c92eb45b, entries=150, sequenceid=78, filesize=11.7 K 2024-12-08T00:19:47,763 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:47,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617247754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:47,764 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:47,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617247756, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:47,768 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/6b4e2b668da54e6793ef2403aa431c92 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/6b4e2b668da54e6793ef2403aa431c92 2024-12-08T00:19:47,784 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/6b4e2b668da54e6793ef2403aa431c92, entries=150, sequenceid=78, filesize=11.7 K 2024-12-08T00:19:47,787 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for f51bdc360ee4fbe2f9447c9b6b4bf1ce in 754ms, sequenceid=78, compaction requested=false 2024-12-08T00:19:47,788 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:19:47,851 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:19:47,851 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-08T00:19:47,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:47,852 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2837): Flushing f51bdc360ee4fbe2f9447c9b6b4bf1ce 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-08T00:19:47,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=A 2024-12-08T00:19:47,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:47,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=B 2024-12-08T00:19:47,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:47,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=C 2024-12-08T00:19:47,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:47,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/a84c4b1734af4a1e94767cd7f993710e is 50, key is test_row_0/A:col10/1733617187066/Put/seqid=0 2024-12-08T00:19:47,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741854_1030 (size=12001) 2024-12-08T00:19:47,901 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/a84c4b1734af4a1e94767cd7f993710e 2024-12-08T00:19:47,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/8db4f0b2f6404cb08d3af10f6fd3c581 is 50, key is test_row_0/B:col10/1733617187066/Put/seqid=0 2024-12-08T00:19:47,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741855_1031 (size=12001) 2024-12-08T00:19:47,949 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/8db4f0b2f6404cb08d3af10f6fd3c581 2024-12-08T00:19:47,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/38b8a951fea14cd1a7f50f3e0861c9a6 is 50, key is test_row_0/C:col10/1733617187066/Put/seqid=0 2024-12-08T00:19:48,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741856_1032 (size=12001) 2024-12-08T00:19:48,007 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/38b8a951fea14cd1a7f50f3e0861c9a6 2024-12-08T00:19:48,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/a84c4b1734af4a1e94767cd7f993710e as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/a84c4b1734af4a1e94767cd7f993710e 2024-12-08T00:19:48,029 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/a84c4b1734af4a1e94767cd7f993710e, entries=150, sequenceid=91, filesize=11.7 K 2024-12-08T00:19:48,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/8db4f0b2f6404cb08d3af10f6fd3c581 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/8db4f0b2f6404cb08d3af10f6fd3c581 2024-12-08T00:19:48,047 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/8db4f0b2f6404cb08d3af10f6fd3c581, entries=150, sequenceid=91, filesize=11.7 K 2024-12-08T00:19:48,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/38b8a951fea14cd1a7f50f3e0861c9a6 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/38b8a951fea14cd1a7f50f3e0861c9a6 2024-12-08T00:19:48,073 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/38b8a951fea14cd1a7f50f3e0861c9a6, entries=150, sequenceid=91, filesize=11.7 K 2024-12-08T00:19:48,075 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=0 B/0 for f51bdc360ee4fbe2f9447c9b6b4bf1ce in 223ms, sequenceid=91, compaction requested=true 2024-12-08T00:19:48,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2538): Flush status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:19:48,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:48,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-12-08T00:19:48,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4106): Remote procedure done, pid=13 2024-12-08T00:19:48,085 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-12-08T00:19:48,086 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.3620 sec 2024-12-08T00:19:48,089 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees in 3.3880 sec 2024-12-08T00:19:48,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on f51bdc360ee4fbe2f9447c9b6b4bf1ce 2024-12-08T00:19:48,287 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f51bdc360ee4fbe2f9447c9b6b4bf1ce 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-08T00:19:48,287 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=A 2024-12-08T00:19:48,287 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:48,287 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=B 2024-12-08T00:19:48,288 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:48,288 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=C 2024-12-08T00:19:48,288 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:48,298 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/33f5fe8267cb4457bf076730ceb4a7c4 is 50, key is test_row_0/A:col10/1733617188285/Put/seqid=0 2024-12-08T00:19:48,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741857_1033 (size=19021) 2024-12-08T00:19:48,329 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=102 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/33f5fe8267cb4457bf076730ceb4a7c4 2024-12-08T00:19:48,347 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/80f1338eaa424c63a5d6d69c432f0583 is 50, key is test_row_0/B:col10/1733617188285/Put/seqid=0 2024-12-08T00:19:48,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741858_1034 (size=12001) 2024-12-08T00:19:48,404 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:48,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617248390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:48,406 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:48,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617248392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:48,415 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:48,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617248405, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:48,416 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:48,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52398 deadline: 1733617248405, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:48,417 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:48,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617248406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:48,509 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:48,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617248508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:48,514 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:48,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617248511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:48,523 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:48,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617248517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:48,523 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:48,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617248519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:48,524 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:48,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52398 deadline: 1733617248518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:48,714 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:48,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617248712, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:48,719 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:48,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617248717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:48,727 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:48,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617248726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:48,729 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:48,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617248726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:48,730 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:48,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52398 deadline: 1733617248729, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:48,762 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=102 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/80f1338eaa424c63a5d6d69c432f0583 2024-12-08T00:19:48,788 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/4ed70870fd3741f6b7be77672e3a7661 is 50, key is test_row_0/C:col10/1733617188285/Put/seqid=0 2024-12-08T00:19:48,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741859_1035 (size=12001) 2024-12-08T00:19:48,821 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=102 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/4ed70870fd3741f6b7be77672e3a7661 2024-12-08T00:19:48,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-08T00:19:48,836 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 12 completed 2024-12-08T00:19:48,839 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T00:19:48,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees 2024-12-08T00:19:48,843 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:19:48,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-08T00:19:48,845 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:19:48,845 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:19:48,845 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/33f5fe8267cb4457bf076730ceb4a7c4 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/33f5fe8267cb4457bf076730ceb4a7c4 2024-12-08T00:19:48,859 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/33f5fe8267cb4457bf076730ceb4a7c4, entries=300, sequenceid=102, filesize=18.6 K 2024-12-08T00:19:48,861 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/80f1338eaa424c63a5d6d69c432f0583 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/80f1338eaa424c63a5d6d69c432f0583 2024-12-08T00:19:48,872 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/80f1338eaa424c63a5d6d69c432f0583, entries=150, sequenceid=102, filesize=11.7 K 2024-12-08T00:19:48,875 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/4ed70870fd3741f6b7be77672e3a7661 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/4ed70870fd3741f6b7be77672e3a7661 2024-12-08T00:19:48,884 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/4ed70870fd3741f6b7be77672e3a7661, entries=150, sequenceid=102, filesize=11.7 K 2024-12-08T00:19:48,899 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for f51bdc360ee4fbe2f9447c9b6b4bf1ce in 612ms, sequenceid=102, compaction requested=true 2024-12-08T00:19:48,900 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:19:48,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f51bdc360ee4fbe2f9447c9b6b4bf1ce:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:19:48,900 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T00:19:48,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:19:48,901 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f51bdc360ee4fbe2f9447c9b6b4bf1ce:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T00:19:48,901 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:19:48,901 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f51bdc360ee4fbe2f9447c9b6b4bf1ce:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:19:48,901 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-08T00:19:48,901 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T00:19:48,903 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 57467 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T00:19:48,904 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): f51bdc360ee4fbe2f9447c9b6b4bf1ce/A is initiating minor compaction (all files) 2024-12-08T00:19:48,904 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f51bdc360ee4fbe2f9447c9b6b4bf1ce/A in TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:48,904 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/0a9061e76bda4f7bb2b81688e43f7c9d, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/906f61f5d8ed43428cee8080ab3600d9, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/a84c4b1734af4a1e94767cd7f993710e, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/33f5fe8267cb4457bf076730ceb4a7c4] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp, totalSize=56.1 K 2024-12-08T00:19:48,904 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T00:19:48,905 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): f51bdc360ee4fbe2f9447c9b6b4bf1ce/B is initiating minor compaction (all files) 2024-12-08T00:19:48,905 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f51bdc360ee4fbe2f9447c9b6b4bf1ce/B in TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:48,905 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/e7ef52cd1e7e4f259459431eae90125e, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/ef141dd317d34df090a18b52c92eb45b, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/8db4f0b2f6404cb08d3af10f6fd3c581, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/80f1338eaa424c63a5d6d69c432f0583] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp, totalSize=47.0 K 2024-12-08T00:19:48,905 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0a9061e76bda4f7bb2b81688e43f7c9d, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733617186222 2024-12-08T00:19:48,906 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting e7ef52cd1e7e4f259459431eae90125e, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733617186222 2024-12-08T00:19:48,906 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 906f61f5d8ed43428cee8080ab3600d9, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733617186644 2024-12-08T00:19:48,906 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting ef141dd317d34df090a18b52c92eb45b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733617186644 2024-12-08T00:19:48,907 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting a84c4b1734af4a1e94767cd7f993710e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1733617187035 2024-12-08T00:19:48,907 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 8db4f0b2f6404cb08d3af10f6fd3c581, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1733617187035 2024-12-08T00:19:48,908 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 33f5fe8267cb4457bf076730ceb4a7c4, keycount=300, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=102, earliestPutTs=1733617188244 2024-12-08T00:19:48,908 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 80f1338eaa424c63a5d6d69c432f0583, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=102, earliestPutTs=1733617188276 2024-12-08T00:19:48,935 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f51bdc360ee4fbe2f9447c9b6b4bf1ce#B#compaction#21 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:19:48,935 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f51bdc360ee4fbe2f9447c9b6b4bf1ce#A#compaction#22 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:19:48,936 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/ee57da1dcc434ae5bc26b95a4e2bdeb2 is 50, key is test_row_0/B:col10/1733617188285/Put/seqid=0 2024-12-08T00:19:48,936 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/a46dcd7e950e43d9992bad05849d0163 is 50, key is test_row_0/A:col10/1733617188285/Put/seqid=0 2024-12-08T00:19:48,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-08T00:19:48,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741860_1036 (size=12241) 2024-12-08T00:19:48,997 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/a46dcd7e950e43d9992bad05849d0163 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/a46dcd7e950e43d9992bad05849d0163 2024-12-08T00:19:48,999 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:19:49,002 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-08T00:19:49,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:49,002 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2837): Flushing f51bdc360ee4fbe2f9447c9b6b4bf1ce 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-08T00:19:49,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=A 2024-12-08T00:19:49,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:49,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=B 2024-12-08T00:19:49,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:49,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=C 2024-12-08T00:19:49,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:49,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741861_1037 (size=12241) 2024-12-08T00:19:49,025 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in f51bdc360ee4fbe2f9447c9b6b4bf1ce/A of f51bdc360ee4fbe2f9447c9b6b4bf1ce into a46dcd7e950e43d9992bad05849d0163(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:19:49,026 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:19:49,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/93290e03f0ae4b45aae4a8ead8e14b01 is 50, key is test_row_0/A:col10/1733617188400/Put/seqid=0 2024-12-08T00:19:49,032 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce., storeName=f51bdc360ee4fbe2f9447c9b6b4bf1ce/A, priority=12, startTime=1733617188900; duration=0sec 2024-12-08T00:19:49,032 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. as already flushing 2024-12-08T00:19:49,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on f51bdc360ee4fbe2f9447c9b6b4bf1ce 2024-12-08T00:19:49,033 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:19:49,033 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f51bdc360ee4fbe2f9447c9b6b4bf1ce:A 2024-12-08T00:19:49,034 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T00:19:49,038 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T00:19:49,038 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): f51bdc360ee4fbe2f9447c9b6b4bf1ce/C is initiating minor compaction (all files) 2024-12-08T00:19:49,039 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f51bdc360ee4fbe2f9447c9b6b4bf1ce/C in TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:49,039 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/03e467cdefc84c2bb49ccdf542c47dfe, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/6b4e2b668da54e6793ef2403aa431c92, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/38b8a951fea14cd1a7f50f3e0861c9a6, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/4ed70870fd3741f6b7be77672e3a7661] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp, totalSize=47.0 K 2024-12-08T00:19:49,040 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 03e467cdefc84c2bb49ccdf542c47dfe, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733617186222 2024-12-08T00:19:49,041 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6b4e2b668da54e6793ef2403aa431c92, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733617186644 2024-12-08T00:19:49,042 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 38b8a951fea14cd1a7f50f3e0861c9a6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1733617187035 2024-12-08T00:19:49,043 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4ed70870fd3741f6b7be77672e3a7661, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=102, earliestPutTs=1733617188276 2024-12-08T00:19:49,054 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:49,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617249048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:49,055 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:49,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617249049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:49,061 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:49,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52398 deadline: 1733617249052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:49,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741862_1038 (size=12001) 2024-12-08T00:19:49,064 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:49,064 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/93290e03f0ae4b45aae4a8ead8e14b01 2024-12-08T00:19:49,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617249053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:49,066 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:49,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617249055, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:49,077 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f51bdc360ee4fbe2f9447c9b6b4bf1ce#C#compaction#24 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:19:49,080 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/b37427017efa4c43ab8eb81797cb59c4 is 50, key is test_row_0/C:col10/1733617188285/Put/seqid=0 2024-12-08T00:19:49,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/e8cb97cd06d34b2bab3134bad0347626 is 50, key is test_row_0/B:col10/1733617188400/Put/seqid=0 2024-12-08T00:19:49,087 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-08T00:19:49,088 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-08T00:19:49,091 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-08T00:19:49,091 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-12-08T00:19:49,092 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-08T00:19:49,092 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-08T00:19:49,093 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-08T00:19:49,093 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-08T00:19:49,094 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-12-08T00:19:49,094 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-12-08T00:19:49,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741863_1039 (size=12001) 2024-12-08T00:19:49,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741864_1040 (size=12241) 2024-12-08T00:19:49,132 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/b37427017efa4c43ab8eb81797cb59c4 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/b37427017efa4c43ab8eb81797cb59c4 2024-12-08T00:19:49,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-08T00:19:49,147 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in f51bdc360ee4fbe2f9447c9b6b4bf1ce/C of f51bdc360ee4fbe2f9447c9b6b4bf1ce into b37427017efa4c43ab8eb81797cb59c4(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:19:49,147 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:19:49,148 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce., storeName=f51bdc360ee4fbe2f9447c9b6b4bf1ce/C, priority=12, startTime=1733617188901; duration=0sec 2024-12-08T00:19:49,148 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:19:49,148 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f51bdc360ee4fbe2f9447c9b6b4bf1ce:C 2024-12-08T00:19:49,160 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:49,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617249157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:49,161 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:49,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617249158, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:49,164 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:49,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52398 deadline: 1733617249164, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:49,167 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:49,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617249166, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:49,170 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:49,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617249168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:49,365 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:49,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617249363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:49,366 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:49,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617249364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:49,370 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:49,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52398 deadline: 1733617249367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:49,373 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:49,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617249372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:49,377 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:49,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617249374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:49,430 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/ee57da1dcc434ae5bc26b95a4e2bdeb2 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/ee57da1dcc434ae5bc26b95a4e2bdeb2 2024-12-08T00:19:49,446 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in f51bdc360ee4fbe2f9447c9b6b4bf1ce/B of f51bdc360ee4fbe2f9447c9b6b4bf1ce into ee57da1dcc434ae5bc26b95a4e2bdeb2(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:19:49,446 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:19:49,446 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce., storeName=f51bdc360ee4fbe2f9447c9b6b4bf1ce/B, priority=12, startTime=1733617188901; duration=0sec 2024-12-08T00:19:49,446 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:19:49,446 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f51bdc360ee4fbe2f9447c9b6b4bf1ce:B 2024-12-08T00:19:49,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-08T00:19:49,505 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/e8cb97cd06d34b2bab3134bad0347626 2024-12-08T00:19:49,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/66b0803397924580960c1f80670039fe is 50, key is test_row_0/C:col10/1733617188400/Put/seqid=0 2024-12-08T00:19:49,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741865_1041 (size=12001) 2024-12-08T00:19:49,670 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:49,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617249669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:49,671 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:49,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617249670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:49,675 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:49,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52398 deadline: 1733617249675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:49,678 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:49,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617249678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:49,681 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:49,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617249680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:49,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-08T00:19:49,961 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/66b0803397924580960c1f80670039fe 2024-12-08T00:19:49,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/93290e03f0ae4b45aae4a8ead8e14b01 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/93290e03f0ae4b45aae4a8ead8e14b01 2024-12-08T00:19:50,018 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/93290e03f0ae4b45aae4a8ead8e14b01, entries=150, sequenceid=127, filesize=11.7 K 2024-12-08T00:19:50,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/e8cb97cd06d34b2bab3134bad0347626 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/e8cb97cd06d34b2bab3134bad0347626 2024-12-08T00:19:50,037 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/e8cb97cd06d34b2bab3134bad0347626, entries=150, sequenceid=127, filesize=11.7 K 2024-12-08T00:19:50,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/66b0803397924580960c1f80670039fe as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/66b0803397924580960c1f80670039fe 2024-12-08T00:19:50,055 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/66b0803397924580960c1f80670039fe, entries=150, sequenceid=127, filesize=11.7 K 2024-12-08T00:19:50,057 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for f51bdc360ee4fbe2f9447c9b6b4bf1ce in 1055ms, sequenceid=127, compaction requested=false 2024-12-08T00:19:50,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2538): Flush status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:19:50,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:50,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-12-08T00:19:50,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4106): Remote procedure done, pid=15 2024-12-08T00:19:50,063 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=14 2024-12-08T00:19:50,063 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2150 sec 2024-12-08T00:19:50,068 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees in 1.2260 sec 2024-12-08T00:19:50,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on f51bdc360ee4fbe2f9447c9b6b4bf1ce 2024-12-08T00:19:50,180 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f51bdc360ee4fbe2f9447c9b6b4bf1ce 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-08T00:19:50,183 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=A 2024-12-08T00:19:50,183 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:50,183 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=B 2024-12-08T00:19:50,183 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:50,183 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=C 2024-12-08T00:19:50,183 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:50,197 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/2d9c19bcbf204979882daa2807f28543 is 50, key is test_row_0/A:col10/1733617190176/Put/seqid=0 2024-12-08T00:19:50,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741866_1042 (size=12151) 2024-12-08T00:19:50,350 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:50,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617250341, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:50,351 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:50,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617250341, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:50,353 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:50,353 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:50,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617250341, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:50,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52398 deadline: 1733617250344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:50,353 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:50,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617250344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:50,454 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:50,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617250453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:50,455 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:50,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617250453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:50,460 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:50,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52398 deadline: 1733617250455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:50,461 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:50,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617250456, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:50,463 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:50,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617250462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:50,608 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=144 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/2d9c19bcbf204979882daa2807f28543 2024-12-08T00:19:50,630 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/d43df4e6c79a4fefa00016178a8e2edd is 50, key is test_row_0/B:col10/1733617190176/Put/seqid=0 2024-12-08T00:19:50,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741867_1043 (size=12151) 2024-12-08T00:19:50,661 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:50,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617250661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:50,665 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:50,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52398 deadline: 1733617250663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:50,666 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:50,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617250665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:50,672 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:50,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617250669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:50,672 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:50,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617250670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:50,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-08T00:19:50,952 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 14 completed 2024-12-08T00:19:50,955 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T00:19:50,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees 2024-12-08T00:19:50,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-08T00:19:50,958 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:19:50,959 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:19:50,959 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:19:50,966 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:50,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617250963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:50,971 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:50,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52398 deadline: 1733617250970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:50,974 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:50,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617250971, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:50,977 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:50,977 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:50,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617250976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:50,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617250976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:51,052 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=144 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/d43df4e6c79a4fefa00016178a8e2edd 2024-12-08T00:19:51,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-08T00:19:51,070 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/9cc72c7b3a444e189f335f8dfd973e7d is 50, key is test_row_0/C:col10/1733617190176/Put/seqid=0 2024-12-08T00:19:51,111 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:19:51,112 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-08T00:19:51,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:51,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. as already flushing 2024-12-08T00:19:51,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:51,113 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:51,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:51,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:51,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741868_1044 (size=12151) 2024-12-08T00:19:51,128 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=144 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/9cc72c7b3a444e189f335f8dfd973e7d 2024-12-08T00:19:51,142 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/2d9c19bcbf204979882daa2807f28543 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/2d9c19bcbf204979882daa2807f28543 2024-12-08T00:19:51,153 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/2d9c19bcbf204979882daa2807f28543, entries=150, sequenceid=144, filesize=11.9 K 2024-12-08T00:19:51,154 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/d43df4e6c79a4fefa00016178a8e2edd as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/d43df4e6c79a4fefa00016178a8e2edd 2024-12-08T00:19:51,163 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/d43df4e6c79a4fefa00016178a8e2edd, entries=150, sequenceid=144, filesize=11.9 K 2024-12-08T00:19:51,166 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/9cc72c7b3a444e189f335f8dfd973e7d as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/9cc72c7b3a444e189f335f8dfd973e7d 2024-12-08T00:19:51,174 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/9cc72c7b3a444e189f335f8dfd973e7d, entries=150, sequenceid=144, filesize=11.9 K 2024-12-08T00:19:51,180 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for f51bdc360ee4fbe2f9447c9b6b4bf1ce in 1000ms, sequenceid=144, compaction requested=true 2024-12-08T00:19:51,180 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:19:51,180 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f51bdc360ee4fbe2f9447c9b6b4bf1ce:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:19:51,180 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:19:51,180 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f51bdc360ee4fbe2f9447c9b6b4bf1ce:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T00:19:51,180 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:19:51,180 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f51bdc360ee4fbe2f9447c9b6b4bf1ce:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:19:51,180 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-08T00:19:51,181 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:19:51,181 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:19:51,183 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:19:51,183 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): f51bdc360ee4fbe2f9447c9b6b4bf1ce/A is initiating minor compaction (all files) 2024-12-08T00:19:51,183 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f51bdc360ee4fbe2f9447c9b6b4bf1ce/A in TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:51,183 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/a46dcd7e950e43d9992bad05849d0163, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/93290e03f0ae4b45aae4a8ead8e14b01, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/2d9c19bcbf204979882daa2807f28543] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp, totalSize=35.5 K 2024-12-08T00:19:51,184 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:19:51,184 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): f51bdc360ee4fbe2f9447c9b6b4bf1ce/B is initiating minor compaction (all files) 2024-12-08T00:19:51,184 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f51bdc360ee4fbe2f9447c9b6b4bf1ce/B in TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:51,184 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/ee57da1dcc434ae5bc26b95a4e2bdeb2, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/e8cb97cd06d34b2bab3134bad0347626, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/d43df4e6c79a4fefa00016178a8e2edd] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp, totalSize=35.5 K 2024-12-08T00:19:51,185 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting a46dcd7e950e43d9992bad05849d0163, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=102, earliestPutTs=1733617188276 2024-12-08T00:19:51,185 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting ee57da1dcc434ae5bc26b95a4e2bdeb2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=102, earliestPutTs=1733617188276 2024-12-08T00:19:51,185 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 93290e03f0ae4b45aae4a8ead8e14b01, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1733617188325 2024-12-08T00:19:51,186 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting e8cb97cd06d34b2bab3134bad0347626, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1733617188325 2024-12-08T00:19:51,186 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2d9c19bcbf204979882daa2807f28543, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=144, earliestPutTs=1733617189050 2024-12-08T00:19:51,186 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting d43df4e6c79a4fefa00016178a8e2edd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=144, earliestPutTs=1733617189050 2024-12-08T00:19:51,207 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f51bdc360ee4fbe2f9447c9b6b4bf1ce#A#compaction#30 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:19:51,208 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/c9235e68ccd647b0b8fdab6476adebe9 is 50, key is test_row_0/A:col10/1733617190176/Put/seqid=0 2024-12-08T00:19:51,216 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f51bdc360ee4fbe2f9447c9b6b4bf1ce#B#compaction#31 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:19:51,217 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/024fd7ca317f41b78c386ae470eb7aac is 50, key is test_row_0/B:col10/1733617190176/Put/seqid=0 2024-12-08T00:19:51,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-08T00:19:51,267 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:19:51,268 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-08T00:19:51,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:51,268 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2837): Flushing f51bdc360ee4fbe2f9447c9b6b4bf1ce 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-08T00:19:51,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=A 2024-12-08T00:19:51,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:51,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=B 2024-12-08T00:19:51,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:51,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=C 2024-12-08T00:19:51,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:51,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741869_1045 (size=12493) 2024-12-08T00:19:51,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/fb6534abf1af4a84b99b61d6856ccb8f is 50, key is test_row_0/A:col10/1733617190339/Put/seqid=0 2024-12-08T00:19:51,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741870_1046 (size=12493) 2024-12-08T00:19:51,307 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/024fd7ca317f41b78c386ae470eb7aac as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/024fd7ca317f41b78c386ae470eb7aac 2024-12-08T00:19:51,318 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f51bdc360ee4fbe2f9447c9b6b4bf1ce/B of f51bdc360ee4fbe2f9447c9b6b4bf1ce into 024fd7ca317f41b78c386ae470eb7aac(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:19:51,318 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:19:51,318 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce., storeName=f51bdc360ee4fbe2f9447c9b6b4bf1ce/B, priority=13, startTime=1733617191180; duration=0sec 2024-12-08T00:19:51,318 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:19:51,318 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f51bdc360ee4fbe2f9447c9b6b4bf1ce:B 2024-12-08T00:19:51,318 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:19:51,320 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:19:51,320 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): f51bdc360ee4fbe2f9447c9b6b4bf1ce/C is initiating minor compaction (all files) 2024-12-08T00:19:51,321 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f51bdc360ee4fbe2f9447c9b6b4bf1ce/C in TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:51,321 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/b37427017efa4c43ab8eb81797cb59c4, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/66b0803397924580960c1f80670039fe, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/9cc72c7b3a444e189f335f8dfd973e7d] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp, totalSize=35.5 K 2024-12-08T00:19:51,321 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting b37427017efa4c43ab8eb81797cb59c4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=102, earliestPutTs=1733617188276 2024-12-08T00:19:51,322 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 66b0803397924580960c1f80670039fe, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1733617188325 2024-12-08T00:19:51,323 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 9cc72c7b3a444e189f335f8dfd973e7d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=144, earliestPutTs=1733617189050 2024-12-08T00:19:51,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741871_1047 (size=12151) 2024-12-08T00:19:51,326 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=166 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/fb6534abf1af4a84b99b61d6856ccb8f 2024-12-08T00:19:51,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/e816c33803f04d6a8728460ae604ebfd is 50, key is test_row_0/B:col10/1733617190339/Put/seqid=0 2024-12-08T00:19:51,350 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f51bdc360ee4fbe2f9447c9b6b4bf1ce#C#compaction#34 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:19:51,351 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/f65b042ddb434df2a6bd18f246702af5 is 50, key is test_row_0/C:col10/1733617190176/Put/seqid=0 2024-12-08T00:19:51,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741872_1048 (size=12151) 2024-12-08T00:19:51,386 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=166 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/e816c33803f04d6a8728460ae604ebfd 2024-12-08T00:19:51,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741873_1049 (size=12493) 2024-12-08T00:19:51,412 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/f65b042ddb434df2a6bd18f246702af5 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/f65b042ddb434df2a6bd18f246702af5 2024-12-08T00:19:51,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/9bb8d29d4a7e4bdc81782d10323bde68 is 50, key is test_row_0/C:col10/1733617190339/Put/seqid=0 2024-12-08T00:19:51,425 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f51bdc360ee4fbe2f9447c9b6b4bf1ce/C of f51bdc360ee4fbe2f9447c9b6b4bf1ce into f65b042ddb434df2a6bd18f246702af5(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:19:51,426 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:19:51,426 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce., storeName=f51bdc360ee4fbe2f9447c9b6b4bf1ce/C, priority=13, startTime=1733617191180; duration=0sec 2024-12-08T00:19:51,426 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:19:51,426 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f51bdc360ee4fbe2f9447c9b6b4bf1ce:C 2024-12-08T00:19:51,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741874_1050 (size=12151) 2024-12-08T00:19:51,455 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=166 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/9bb8d29d4a7e4bdc81782d10323bde68 2024-12-08T00:19:51,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/fb6534abf1af4a84b99b61d6856ccb8f as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/fb6534abf1af4a84b99b61d6856ccb8f 2024-12-08T00:19:51,473 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/fb6534abf1af4a84b99b61d6856ccb8f, entries=150, sequenceid=166, filesize=11.9 K 2024-12-08T00:19:51,474 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. as already flushing 2024-12-08T00:19:51,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on f51bdc360ee4fbe2f9447c9b6b4bf1ce 2024-12-08T00:19:51,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/e816c33803f04d6a8728460ae604ebfd as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/e816c33803f04d6a8728460ae604ebfd 2024-12-08T00:19:51,485 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/e816c33803f04d6a8728460ae604ebfd, entries=150, sequenceid=166, filesize=11.9 K 2024-12-08T00:19:51,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/9bb8d29d4a7e4bdc81782d10323bde68 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/9bb8d29d4a7e4bdc81782d10323bde68 2024-12-08T00:19:51,498 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/9bb8d29d4a7e4bdc81782d10323bde68, entries=150, sequenceid=166, filesize=11.9 K 2024-12-08T00:19:51,499 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=53.67 KB/54960 for f51bdc360ee4fbe2f9447c9b6b4bf1ce in 231ms, sequenceid=166, compaction requested=false 2024-12-08T00:19:51,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2538): Flush status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:19:51,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:51,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-12-08T00:19:51,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4106): Remote procedure done, pid=17 2024-12-08T00:19:51,503 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=17, resume processing ppid=16 2024-12-08T00:19:51,503 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, ppid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 542 msec 2024-12-08T00:19:51,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on f51bdc360ee4fbe2f9447c9b6b4bf1ce 2024-12-08T00:19:51,505 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f51bdc360ee4fbe2f9447c9b6b4bf1ce 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-08T00:19:51,505 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=A 2024-12-08T00:19:51,505 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:51,505 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=B 2024-12-08T00:19:51,505 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:51,506 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=C 2024-12-08T00:19:51,506 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:51,506 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees in 549 msec 2024-12-08T00:19:51,518 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/67be1088180440c68f2c4cc737509903 is 50, key is test_row_0/A:col10/1733617191500/Put/seqid=0 2024-12-08T00:19:51,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741875_1051 (size=16931) 2024-12-08T00:19:51,548 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:51,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52398 deadline: 1733617251537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:51,549 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:51,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617251539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:51,552 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:51,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617251548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:51,553 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:51,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617251549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:51,554 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:51,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617251549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:51,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-08T00:19:51,563 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 16 completed 2024-12-08T00:19:51,565 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T00:19:51,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees 2024-12-08T00:19:51,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-08T00:19:51,569 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:19:51,570 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:19:51,570 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:19:51,651 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:51,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52398 deadline: 1733617251650, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:51,652 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:51,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617251651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:51,658 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:51,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617251655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:51,659 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:51,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617251656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:51,659 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:51,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617251656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:51,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-08T00:19:51,704 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/c9235e68ccd647b0b8fdab6476adebe9 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/c9235e68ccd647b0b8fdab6476adebe9 2024-12-08T00:19:51,716 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f51bdc360ee4fbe2f9447c9b6b4bf1ce/A of f51bdc360ee4fbe2f9447c9b6b4bf1ce into c9235e68ccd647b0b8fdab6476adebe9(size=12.2 K), total size for store is 24.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:19:51,716 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:19:51,717 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce., storeName=f51bdc360ee4fbe2f9447c9b6b4bf1ce/A, priority=13, startTime=1733617191180; duration=0sec 2024-12-08T00:19:51,717 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:19:51,717 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f51bdc360ee4fbe2f9447c9b6b4bf1ce:A 2024-12-08T00:19:51,723 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:19:51,723 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-08T00:19:51,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:51,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. as already flushing 2024-12-08T00:19:51,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:51,724 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:51,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:51,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:51,858 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:51,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52398 deadline: 1733617251855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:51,859 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:51,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617251855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:51,861 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:51,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617251860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:51,862 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:51,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617251862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:51,864 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:51,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617251862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:51,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-08T00:19:51,878 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:19:51,878 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-08T00:19:51,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:51,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. as already flushing 2024-12-08T00:19:51,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:51,879 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:51,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:51,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:51,948 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=181 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/67be1088180440c68f2c4cc737509903 2024-12-08T00:19:51,968 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/b517753406944e03ae20dd274b521c97 is 50, key is test_row_0/B:col10/1733617191500/Put/seqid=0 2024-12-08T00:19:51,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741876_1052 (size=12151) 2024-12-08T00:19:51,996 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=181 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/b517753406944e03ae20dd274b521c97 2024-12-08T00:19:52,012 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/68286d3cfb694c90b6063d905b2c5e21 is 50, key is test_row_0/C:col10/1733617191500/Put/seqid=0 2024-12-08T00:19:52,032 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:19:52,032 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-08T00:19:52,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:52,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. as already flushing 2024-12-08T00:19:52,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:52,033 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:52,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:52,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:52,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741877_1053 (size=12151) 2024-12-08T00:19:52,049 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=181 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/68286d3cfb694c90b6063d905b2c5e21 2024-12-08T00:19:52,059 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/67be1088180440c68f2c4cc737509903 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/67be1088180440c68f2c4cc737509903 2024-12-08T00:19:52,080 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/67be1088180440c68f2c4cc737509903, entries=250, sequenceid=181, filesize=16.5 K 2024-12-08T00:19:52,082 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/b517753406944e03ae20dd274b521c97 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/b517753406944e03ae20dd274b521c97 2024-12-08T00:19:52,090 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/b517753406944e03ae20dd274b521c97, entries=150, sequenceid=181, filesize=11.9 K 2024-12-08T00:19:52,091 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/68286d3cfb694c90b6063d905b2c5e21 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/68286d3cfb694c90b6063d905b2c5e21 2024-12-08T00:19:52,104 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/68286d3cfb694c90b6063d905b2c5e21, entries=150, sequenceid=181, filesize=11.9 K 2024-12-08T00:19:52,105 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for f51bdc360ee4fbe2f9447c9b6b4bf1ce in 600ms, sequenceid=181, compaction requested=true 2024-12-08T00:19:52,106 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:19:52,106 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f51bdc360ee4fbe2f9447c9b6b4bf1ce:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:19:52,106 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:19:52,106 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:19:52,106 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f51bdc360ee4fbe2f9447c9b6b4bf1ce:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T00:19:52,106 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:19:52,106 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:19:52,106 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f51bdc360ee4fbe2f9447c9b6b4bf1ce:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:19:52,106 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:19:52,108 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:19:52,108 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): f51bdc360ee4fbe2f9447c9b6b4bf1ce/B is initiating minor compaction (all files) 2024-12-08T00:19:52,108 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f51bdc360ee4fbe2f9447c9b6b4bf1ce/B in TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:52,109 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/024fd7ca317f41b78c386ae470eb7aac, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/e816c33803f04d6a8728460ae604ebfd, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/b517753406944e03ae20dd274b521c97] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp, totalSize=35.9 K 2024-12-08T00:19:52,109 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 41575 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:19:52,109 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): f51bdc360ee4fbe2f9447c9b6b4bf1ce/A is initiating minor compaction (all files) 2024-12-08T00:19:52,109 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f51bdc360ee4fbe2f9447c9b6b4bf1ce/A in TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:52,109 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/c9235e68ccd647b0b8fdab6476adebe9, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/fb6534abf1af4a84b99b61d6856ccb8f, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/67be1088180440c68f2c4cc737509903] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp, totalSize=40.6 K 2024-12-08T00:19:52,110 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting c9235e68ccd647b0b8fdab6476adebe9, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=144, earliestPutTs=1733617189050 2024-12-08T00:19:52,110 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 024fd7ca317f41b78c386ae470eb7aac, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=144, earliestPutTs=1733617189050 2024-12-08T00:19:52,111 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting fb6534abf1af4a84b99b61d6856ccb8f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=166, earliestPutTs=1733617190279 2024-12-08T00:19:52,111 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting e816c33803f04d6a8728460ae604ebfd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=166, earliestPutTs=1733617190279 2024-12-08T00:19:52,112 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 67be1088180440c68f2c4cc737509903, keycount=250, bloomtype=ROW, size=16.5 K, encoding=NONE, compression=NONE, seqNum=181, earliestPutTs=1733617191486 2024-12-08T00:19:52,112 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting b517753406944e03ae20dd274b521c97, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=181, earliestPutTs=1733617191486 2024-12-08T00:19:52,132 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f51bdc360ee4fbe2f9447c9b6b4bf1ce#A#compaction#39 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:19:52,133 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/4fcdb696a5c04d1484053e5cf90c77c2 is 50, key is test_row_0/A:col10/1733617191500/Put/seqid=0 2024-12-08T00:19:52,134 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f51bdc360ee4fbe2f9447c9b6b4bf1ce#B#compaction#40 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:19:52,135 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/5638ff936e1c4a6199753c2cbb1b815c is 50, key is test_row_0/B:col10/1733617191500/Put/seqid=0 2024-12-08T00:19:52,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741878_1054 (size=12595) 2024-12-08T00:19:52,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on f51bdc360ee4fbe2f9447c9b6b4bf1ce 2024-12-08T00:19:52,166 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f51bdc360ee4fbe2f9447c9b6b4bf1ce 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-08T00:19:52,168 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=A 2024-12-08T00:19:52,168 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:52,168 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=B 2024-12-08T00:19:52,168 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:52,168 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=C 2024-12-08T00:19:52,168 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:52,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-08T00:19:52,174 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/5638ff936e1c4a6199753c2cbb1b815c as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/5638ff936e1c4a6199753c2cbb1b815c 2024-12-08T00:19:52,181 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/38fda6f06e754de4844907e9b1b8d450 is 50, key is test_row_0/A:col10/1733617191545/Put/seqid=0 2024-12-08T00:19:52,184 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:52,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617252176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:52,184 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:52,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617252177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:52,185 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f51bdc360ee4fbe2f9447c9b6b4bf1ce/B of f51bdc360ee4fbe2f9447c9b6b4bf1ce into 5638ff936e1c4a6199753c2cbb1b815c(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:19:52,185 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:19:52,185 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:52,185 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce., storeName=f51bdc360ee4fbe2f9447c9b6b4bf1ce/B, priority=13, startTime=1733617192106; duration=0sec 2024-12-08T00:19:52,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52398 deadline: 1733617252179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:52,186 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:19:52,186 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:52,186 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f51bdc360ee4fbe2f9447c9b6b4bf1ce:B 2024-12-08T00:19:52,186 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:19:52,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617252181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:52,186 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:19:52,187 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-08T00:19:52,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:52,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. as already flushing 2024-12-08T00:19:52,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:52,187 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:52,188 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:19:52,188 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): f51bdc360ee4fbe2f9447c9b6b4bf1ce/C is initiating minor compaction (all files) 2024-12-08T00:19:52,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:52,188 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f51bdc360ee4fbe2f9447c9b6b4bf1ce/C in TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:52,188 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/f65b042ddb434df2a6bd18f246702af5, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/9bb8d29d4a7e4bdc81782d10323bde68, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/68286d3cfb694c90b6063d905b2c5e21] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp, totalSize=35.9 K 2024-12-08T00:19:52,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:52,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741879_1055 (size=12595) 2024-12-08T00:19:52,190 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting f65b042ddb434df2a6bd18f246702af5, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=144, earliestPutTs=1733617189050 2024-12-08T00:19:52,190 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:52,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617252184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:52,190 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 9bb8d29d4a7e4bdc81782d10323bde68, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=166, earliestPutTs=1733617190279 2024-12-08T00:19:52,192 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 68286d3cfb694c90b6063d905b2c5e21, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=181, earliestPutTs=1733617191486 2024-12-08T00:19:52,200 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/4fcdb696a5c04d1484053e5cf90c77c2 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/4fcdb696a5c04d1484053e5cf90c77c2 2024-12-08T00:19:52,210 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f51bdc360ee4fbe2f9447c9b6b4bf1ce/A of f51bdc360ee4fbe2f9447c9b6b4bf1ce into 4fcdb696a5c04d1484053e5cf90c77c2(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:19:52,210 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:19:52,210 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce., storeName=f51bdc360ee4fbe2f9447c9b6b4bf1ce/A, priority=13, startTime=1733617192106; duration=0sec 2024-12-08T00:19:52,210 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:19:52,210 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f51bdc360ee4fbe2f9447c9b6b4bf1ce:A 2024-12-08T00:19:52,231 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f51bdc360ee4fbe2f9447c9b6b4bf1ce#C#compaction#42 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:19:52,232 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/0b7f11a226c748769542081466054dbe is 50, key is test_row_0/C:col10/1733617191500/Put/seqid=0 2024-12-08T00:19:52,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741880_1056 (size=16931) 2024-12-08T00:19:52,236 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=208 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/38fda6f06e754de4844907e9b1b8d450 2024-12-08T00:19:52,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741881_1057 (size=12595) 2024-12-08T00:19:52,283 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/6d7edaba62404c239b64232790464ad3 is 50, key is test_row_0/B:col10/1733617191545/Put/seqid=0 2024-12-08T00:19:52,291 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:52,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617252287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:52,292 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:52,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617252287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:52,293 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:52,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617252288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:52,293 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:52,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617252292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:52,293 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:52,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52398 deadline: 1733617252287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:52,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741882_1058 (size=12151) 2024-12-08T00:19:52,299 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=208 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/6d7edaba62404c239b64232790464ad3 2024-12-08T00:19:52,331 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/d1d4b8a11777466a941914cf59357873 is 50, key is test_row_0/C:col10/1733617191545/Put/seqid=0 2024-12-08T00:19:52,340 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:19:52,341 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-08T00:19:52,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:52,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. as already flushing 2024-12-08T00:19:52,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:52,342 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:52,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:52,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:52,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741883_1059 (size=12151) 2024-12-08T00:19:52,362 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=208 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/d1d4b8a11777466a941914cf59357873 2024-12-08T00:19:52,371 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/38fda6f06e754de4844907e9b1b8d450 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/38fda6f06e754de4844907e9b1b8d450 2024-12-08T00:19:52,380 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/38fda6f06e754de4844907e9b1b8d450, entries=250, sequenceid=208, filesize=16.5 K 2024-12-08T00:19:52,381 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/6d7edaba62404c239b64232790464ad3 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/6d7edaba62404c239b64232790464ad3 2024-12-08T00:19:52,398 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/6d7edaba62404c239b64232790464ad3, entries=150, sequenceid=208, filesize=11.9 K 2024-12-08T00:19:52,401 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/d1d4b8a11777466a941914cf59357873 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/d1d4b8a11777466a941914cf59357873 2024-12-08T00:19:52,416 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/d1d4b8a11777466a941914cf59357873, entries=150, sequenceid=208, filesize=11.9 K 2024-12-08T00:19:52,417 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for f51bdc360ee4fbe2f9447c9b6b4bf1ce in 251ms, sequenceid=208, compaction requested=false 2024-12-08T00:19:52,418 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:19:52,496 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:19:52,496 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-08T00:19:52,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:52,497 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2837): Flushing f51bdc360ee4fbe2f9447c9b6b4bf1ce 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-08T00:19:52,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=A 2024-12-08T00:19:52,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:52,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=B 2024-12-08T00:19:52,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:52,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=C 2024-12-08T00:19:52,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:52,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on f51bdc360ee4fbe2f9447c9b6b4bf1ce 2024-12-08T00:19:52,501 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. as already flushing 2024-12-08T00:19:52,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/3ce62155b8fa4017b67e85bf5d34d30d is 50, key is test_row_0/A:col10/1733617192179/Put/seqid=0 2024-12-08T00:19:52,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741884_1060 (size=14541) 2024-12-08T00:19:52,536 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=221 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/3ce62155b8fa4017b67e85bf5d34d30d 2024-12-08T00:19:52,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/2c07171306f746f4a3ebf590c38d25ec is 50, key is test_row_0/B:col10/1733617192179/Put/seqid=0 2024-12-08T00:19:52,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741885_1061 (size=12151) 2024-12-08T00:19:52,561 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=221 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/2c07171306f746f4a3ebf590c38d25ec 2024-12-08T00:19:52,564 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:52,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52398 deadline: 1733617252553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:52,565 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:52,565 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:52,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617252555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:52,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617252553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:52,568 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:52,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617252565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:52,569 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:52,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617252565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:52,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/6199ad78b6c9435990cdeb693b878c92 is 50, key is test_row_0/C:col10/1733617192179/Put/seqid=0 2024-12-08T00:19:52,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741886_1062 (size=12151) 2024-12-08T00:19:52,591 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=221 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/6199ad78b6c9435990cdeb693b878c92 2024-12-08T00:19:52,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/3ce62155b8fa4017b67e85bf5d34d30d as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/3ce62155b8fa4017b67e85bf5d34d30d 2024-12-08T00:19:52,606 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/3ce62155b8fa4017b67e85bf5d34d30d, entries=200, sequenceid=221, filesize=14.2 K 2024-12-08T00:19:52,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/2c07171306f746f4a3ebf590c38d25ec as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/2c07171306f746f4a3ebf590c38d25ec 2024-12-08T00:19:52,617 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/2c07171306f746f4a3ebf590c38d25ec, entries=150, sequenceid=221, filesize=11.9 K 2024-12-08T00:19:52,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/6199ad78b6c9435990cdeb693b878c92 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/6199ad78b6c9435990cdeb693b878c92 2024-12-08T00:19:52,627 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/6199ad78b6c9435990cdeb693b878c92, entries=150, sequenceid=221, filesize=11.9 K 2024-12-08T00:19:52,630 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for f51bdc360ee4fbe2f9447c9b6b4bf1ce in 133ms, sequenceid=221, compaction requested=true 2024-12-08T00:19:52,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:19:52,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:52,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-12-08T00:19:52,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-12-08T00:19:52,635 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=19, resume processing ppid=18 2024-12-08T00:19:52,635 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0610 sec 2024-12-08T00:19:52,638 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees in 1.0710 sec 2024-12-08T00:19:52,672 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f51bdc360ee4fbe2f9447c9b6b4bf1ce 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-12-08T00:19:52,673 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=A 2024-12-08T00:19:52,673 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:52,673 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=B 2024-12-08T00:19:52,673 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:52,673 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=C 2024-12-08T00:19:52,673 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:52,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-08T00:19:52,674 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 18 completed 2024-12-08T00:19:52,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on f51bdc360ee4fbe2f9447c9b6b4bf1ce 2024-12-08T00:19:52,679 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T00:19:52,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees 2024-12-08T00:19:52,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-08T00:19:52,683 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:19:52,683 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/0b7f11a226c748769542081466054dbe as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/0b7f11a226c748769542081466054dbe 2024-12-08T00:19:52,684 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:19:52,684 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:19:52,688 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:52,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617252678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:52,689 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:52,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617252679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:52,690 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:52,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617252681, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:52,692 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/e6930d40c316458e96a1eb28692e6e7b is 50, key is test_row_0/A:col10/1733617192670/Put/seqid=0 2024-12-08T00:19:52,692 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:52,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52398 deadline: 1733617252683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:52,693 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:52,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617252685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:52,695 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f51bdc360ee4fbe2f9447c9b6b4bf1ce/C of f51bdc360ee4fbe2f9447c9b6b4bf1ce into 0b7f11a226c748769542081466054dbe(size=12.3 K), total size for store is 36.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:19:52,695 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:19:52,695 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce., storeName=f51bdc360ee4fbe2f9447c9b6b4bf1ce/C, priority=13, startTime=1733617192106; duration=0sec 2024-12-08T00:19:52,695 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:19:52,695 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f51bdc360ee4fbe2f9447c9b6b4bf1ce:C 2024-12-08T00:19:52,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741887_1063 (size=16931) 2024-12-08T00:19:52,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-08T00:19:52,792 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:52,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617252791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:52,794 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:52,794 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:52,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617252792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:52,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617252791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:52,795 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:52,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52398 deadline: 1733617252794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:52,796 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:52,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617252794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:52,837 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:19:52,840 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-08T00:19:52,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:52,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. as already flushing 2024-12-08T00:19:52,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:52,842 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:52,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:52,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:52,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-08T00:19:52,995 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:19:52,995 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:52,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617252994, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:52,996 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:52,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617252996, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:52,997 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:52,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52398 deadline: 1733617252996, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:52,999 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-08T00:19:52,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:52,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. as already flushing 2024-12-08T00:19:52,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:52,999 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:53,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:53,000 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:53,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617252999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:53,000 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:53,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:53,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617253000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:53,136 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/e6930d40c316458e96a1eb28692e6e7b 2024-12-08T00:19:53,150 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/c65afcf7ebb6456c9b70028fd26b0bf1 is 50, key is test_row_0/B:col10/1733617192670/Put/seqid=0 2024-12-08T00:19:53,153 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:19:53,153 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-08T00:19:53,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:53,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. as already flushing 2024-12-08T00:19:53,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:53,154 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:53,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:53,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:53,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741888_1064 (size=12151) 2024-12-08T00:19:53,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-08T00:19:53,300 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:53,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617253299, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:53,301 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:53,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617253299, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:53,302 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:53,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52398 deadline: 1733617253299, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:53,304 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:53,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617253304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:53,308 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:53,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617253305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:53,310 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:19:53,310 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-08T00:19:53,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:53,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. as already flushing 2024-12-08T00:19:53,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:53,311 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:53,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:53,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:53,464 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:19:53,464 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-08T00:19:53,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:53,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. as already flushing 2024-12-08T00:19:53,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:53,465 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:53,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:53,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:53,580 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/c65afcf7ebb6456c9b70028fd26b0bf1 2024-12-08T00:19:53,599 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/05574ef9617549da989183ef24de0bf8 is 50, key is test_row_0/C:col10/1733617192670/Put/seqid=0 2024-12-08T00:19:53,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741889_1065 (size=12151) 2024-12-08T00:19:53,611 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/05574ef9617549da989183ef24de0bf8 2024-12-08T00:19:53,620 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:19:53,621 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-08T00:19:53,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:53,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. as already flushing 2024-12-08T00:19:53,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:53,621 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:53,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:53,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:53,640 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/e6930d40c316458e96a1eb28692e6e7b as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/e6930d40c316458e96a1eb28692e6e7b 2024-12-08T00:19:53,654 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/e6930d40c316458e96a1eb28692e6e7b, entries=250, sequenceid=249, filesize=16.5 K 2024-12-08T00:19:53,660 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/c65afcf7ebb6456c9b70028fd26b0bf1 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/c65afcf7ebb6456c9b70028fd26b0bf1 2024-12-08T00:19:53,672 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/c65afcf7ebb6456c9b70028fd26b0bf1, entries=150, sequenceid=249, filesize=11.9 K 2024-12-08T00:19:53,674 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/05574ef9617549da989183ef24de0bf8 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/05574ef9617549da989183ef24de0bf8 2024-12-08T00:19:53,683 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/05574ef9617549da989183ef24de0bf8, entries=150, sequenceid=249, filesize=11.9 K 2024-12-08T00:19:53,685 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for f51bdc360ee4fbe2f9447c9b6b4bf1ce in 1013ms, sequenceid=249, compaction requested=true 2024-12-08T00:19:53,685 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:19:53,686 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T00:19:53,688 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 60998 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T00:19:53,688 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): f51bdc360ee4fbe2f9447c9b6b4bf1ce/A is initiating minor compaction (all files) 2024-12-08T00:19:53,688 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f51bdc360ee4fbe2f9447c9b6b4bf1ce/A in TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:53,688 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/4fcdb696a5c04d1484053e5cf90c77c2, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/38fda6f06e754de4844907e9b1b8d450, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/3ce62155b8fa4017b67e85bf5d34d30d, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/e6930d40c316458e96a1eb28692e6e7b] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp, totalSize=59.6 K 2024-12-08T00:19:53,689 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4fcdb696a5c04d1484053e5cf90c77c2, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=181, earliestPutTs=1733617191486 2024-12-08T00:19:53,690 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 38fda6f06e754de4844907e9b1b8d450, keycount=250, bloomtype=ROW, size=16.5 K, encoding=NONE, compression=NONE, seqNum=208, earliestPutTs=1733617191545 2024-12-08T00:19:53,691 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3ce62155b8fa4017b67e85bf5d34d30d, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1733617192176 2024-12-08T00:19:53,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f51bdc360ee4fbe2f9447c9b6b4bf1ce:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:19:53,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:19:53,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f51bdc360ee4fbe2f9447c9b6b4bf1ce:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T00:19:53,692 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting e6930d40c316458e96a1eb28692e6e7b, keycount=250, bloomtype=ROW, size=16.5 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1733617192530 2024-12-08T00:19:53,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:19:53,692 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T00:19:53,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f51bdc360ee4fbe2f9447c9b6b4bf1ce:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:19:53,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:19:53,695 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49048 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T00:19:53,695 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): f51bdc360ee4fbe2f9447c9b6b4bf1ce/B is initiating minor compaction (all files) 2024-12-08T00:19:53,695 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f51bdc360ee4fbe2f9447c9b6b4bf1ce/B in TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:53,696 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/5638ff936e1c4a6199753c2cbb1b815c, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/6d7edaba62404c239b64232790464ad3, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/2c07171306f746f4a3ebf590c38d25ec, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/c65afcf7ebb6456c9b70028fd26b0bf1] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp, totalSize=47.9 K 2024-12-08T00:19:53,697 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 5638ff936e1c4a6199753c2cbb1b815c, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=181, earliestPutTs=1733617191486 2024-12-08T00:19:53,698 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 6d7edaba62404c239b64232790464ad3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=208, earliestPutTs=1733617191545 2024-12-08T00:19:53,698 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 2c07171306f746f4a3ebf590c38d25ec, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1733617192176 2024-12-08T00:19:53,699 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting c65afcf7ebb6456c9b70028fd26b0bf1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1733617192552 2024-12-08T00:19:53,728 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f51bdc360ee4fbe2f9447c9b6b4bf1ce#B#compaction#52 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:19:53,729 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/6fce2c86f35c495a8d98df092f937f32 is 50, key is test_row_0/B:col10/1733617192670/Put/seqid=0 2024-12-08T00:19:53,730 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f51bdc360ee4fbe2f9447c9b6b4bf1ce#A#compaction#51 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:19:53,732 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/66ce41fe24bd407fb9a5898663514c9f is 50, key is test_row_0/A:col10/1733617192670/Put/seqid=0 2024-12-08T00:19:53,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741890_1066 (size=12731) 2024-12-08T00:19:53,765 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/6fce2c86f35c495a8d98df092f937f32 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/6fce2c86f35c495a8d98df092f937f32 2024-12-08T00:19:53,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741891_1067 (size=12731) 2024-12-08T00:19:53,776 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in f51bdc360ee4fbe2f9447c9b6b4bf1ce/B of f51bdc360ee4fbe2f9447c9b6b4bf1ce into 6fce2c86f35c495a8d98df092f937f32(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:19:53,776 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:19:53,777 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce., storeName=f51bdc360ee4fbe2f9447c9b6b4bf1ce/B, priority=12, startTime=1733617193692; duration=0sec 2024-12-08T00:19:53,777 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:19:53,777 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f51bdc360ee4fbe2f9447c9b6b4bf1ce:B 2024-12-08T00:19:53,777 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T00:19:53,780 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:19:53,781 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-08T00:19:53,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:53,782 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing f51bdc360ee4fbe2f9447c9b6b4bf1ce 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-08T00:19:53,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=A 2024-12-08T00:19:53,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:53,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=B 2024-12-08T00:19:53,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:53,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=C 2024-12-08T00:19:53,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:53,783 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49048 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T00:19:53,783 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): f51bdc360ee4fbe2f9447c9b6b4bf1ce/C is initiating minor compaction (all files) 2024-12-08T00:19:53,784 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f51bdc360ee4fbe2f9447c9b6b4bf1ce/C in TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:53,784 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/0b7f11a226c748769542081466054dbe, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/d1d4b8a11777466a941914cf59357873, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/6199ad78b6c9435990cdeb693b878c92, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/05574ef9617549da989183ef24de0bf8] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp, totalSize=47.9 K 2024-12-08T00:19:53,785 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 0b7f11a226c748769542081466054dbe, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=181, earliestPutTs=1733617191486 2024-12-08T00:19:53,786 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting d1d4b8a11777466a941914cf59357873, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=208, earliestPutTs=1733617191545 2024-12-08T00:19:53,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-08T00:19:53,788 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 6199ad78b6c9435990cdeb693b878c92, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1733617192176 2024-12-08T00:19:53,790 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 05574ef9617549da989183ef24de0bf8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1733617192552 2024-12-08T00:19:53,792 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/66ce41fe24bd407fb9a5898663514c9f as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/66ce41fe24bd407fb9a5898663514c9f 2024-12-08T00:19:53,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/38d5c2aac3cf4d66bb46a5560754c4be is 50, key is test_row_0/A:col10/1733617192675/Put/seqid=0 2024-12-08T00:19:53,803 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in f51bdc360ee4fbe2f9447c9b6b4bf1ce/A of f51bdc360ee4fbe2f9447c9b6b4bf1ce into 66ce41fe24bd407fb9a5898663514c9f(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:19:53,803 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:19:53,804 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce., storeName=f51bdc360ee4fbe2f9447c9b6b4bf1ce/A, priority=12, startTime=1733617193685; duration=0sec 2024-12-08T00:19:53,806 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:19:53,806 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f51bdc360ee4fbe2f9447c9b6b4bf1ce:A 2024-12-08T00:19:53,812 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. as already flushing 2024-12-08T00:19:53,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on f51bdc360ee4fbe2f9447c9b6b4bf1ce 2024-12-08T00:19:53,818 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f51bdc360ee4fbe2f9447c9b6b4bf1ce#C#compaction#54 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:19:53,819 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/73b1c1bd3da54031b59f9aa5734d7f48 is 50, key is test_row_0/C:col10/1733617192670/Put/seqid=0 2024-12-08T00:19:53,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741892_1068 (size=12151) 2024-12-08T00:19:53,836 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=259 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/38d5c2aac3cf4d66bb46a5560754c4be 2024-12-08T00:19:53,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/3f1d76e9ef494fbebe5358e008a5bd34 is 50, key is test_row_0/B:col10/1733617192675/Put/seqid=0 2024-12-08T00:19:53,856 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:53,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617253849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:53,859 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:53,860 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:53,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52398 deadline: 1733617253852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:53,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617253852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:53,860 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:53,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617253853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:53,862 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:53,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617253854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:53,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741893_1069 (size=12731) 2024-12-08T00:19:53,896 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/73b1c1bd3da54031b59f9aa5734d7f48 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/73b1c1bd3da54031b59f9aa5734d7f48 2024-12-08T00:19:53,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741894_1070 (size=12151) 2024-12-08T00:19:53,906 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in f51bdc360ee4fbe2f9447c9b6b4bf1ce/C of f51bdc360ee4fbe2f9447c9b6b4bf1ce into 73b1c1bd3da54031b59f9aa5734d7f48(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:19:53,906 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:19:53,906 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce., storeName=f51bdc360ee4fbe2f9447c9b6b4bf1ce/C, priority=12, startTime=1733617193692; duration=0sec 2024-12-08T00:19:53,906 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:19:53,906 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f51bdc360ee4fbe2f9447c9b6b4bf1ce:C 2024-12-08T00:19:53,959 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:53,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617253958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:53,962 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:53,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52398 deadline: 1733617253962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:53,963 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:53,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617253962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:53,965 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:53,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617253963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:53,966 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:53,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617253964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:54,161 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:54,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617254161, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:54,166 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:54,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617254165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:54,168 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:54,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617254167, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:54,169 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:54,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617254168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:54,172 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:54,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52398 deadline: 1733617254171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:54,305 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=259 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/3f1d76e9ef494fbebe5358e008a5bd34 2024-12-08T00:19:54,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/2856c69bad0b40388efa3f9f535536d7 is 50, key is test_row_0/C:col10/1733617192675/Put/seqid=0 2024-12-08T00:19:54,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741895_1071 (size=12151) 2024-12-08T00:19:54,468 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:54,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617254467, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:54,471 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:54,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617254471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:54,472 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:54,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617254471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:54,473 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:54,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617254472, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:54,476 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:54,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52398 deadline: 1733617254476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:54,747 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=259 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/2856c69bad0b40388efa3f9f535536d7 2024-12-08T00:19:54,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/38d5c2aac3cf4d66bb46a5560754c4be as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/38d5c2aac3cf4d66bb46a5560754c4be 2024-12-08T00:19:54,775 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/38d5c2aac3cf4d66bb46a5560754c4be, entries=150, sequenceid=259, filesize=11.9 K 2024-12-08T00:19:54,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/3f1d76e9ef494fbebe5358e008a5bd34 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/3f1d76e9ef494fbebe5358e008a5bd34 2024-12-08T00:19:54,784 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/3f1d76e9ef494fbebe5358e008a5bd34, entries=150, sequenceid=259, filesize=11.9 K 2024-12-08T00:19:54,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/2856c69bad0b40388efa3f9f535536d7 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/2856c69bad0b40388efa3f9f535536d7 2024-12-08T00:19:54,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-08T00:19:54,798 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/2856c69bad0b40388efa3f9f535536d7, entries=150, sequenceid=259, filesize=11.9 K 2024-12-08T00:19:54,799 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=174.43 KB/178620 for f51bdc360ee4fbe2f9447c9b6b4bf1ce in 1018ms, sequenceid=259, compaction requested=false 2024-12-08T00:19:54,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:19:54,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:54,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-12-08T00:19:54,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-12-08T00:19:54,804 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=21, resume processing ppid=20 2024-12-08T00:19:54,804 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1180 sec 2024-12-08T00:19:54,807 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees in 2.1250 sec 2024-12-08T00:19:54,977 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f51bdc360ee4fbe2f9447c9b6b4bf1ce 3/3 column families, dataSize=187.85 KB heapSize=492.94 KB 2024-12-08T00:19:54,977 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=A 2024-12-08T00:19:54,977 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:54,978 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=B 2024-12-08T00:19:54,978 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:54,978 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=C 2024-12-08T00:19:54,978 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:54,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on f51bdc360ee4fbe2f9447c9b6b4bf1ce 2024-12-08T00:19:54,983 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:54,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617254977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:54,984 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:54,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617254979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:54,984 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:54,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52398 deadline: 1733617254980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:54,985 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:54,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617254981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:54,985 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:54,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617254982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:54,989 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/a9edcfb5e3e04c009e7cd960c9bf6a3b is 50, key is test_row_0/A:col10/1733617194975/Put/seqid=0 2024-12-08T00:19:55,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741896_1072 (size=17181) 2024-12-08T00:19:55,013 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=62.62 KB at sequenceid=292 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/a9edcfb5e3e04c009e7cd960c9bf6a3b 2024-12-08T00:19:55,031 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/bc4420dc7542420c9c42a493a7d2245a is 50, key is test_row_0/B:col10/1733617194975/Put/seqid=0 2024-12-08T00:19:55,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741897_1073 (size=12301) 2024-12-08T00:19:55,060 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=62.62 KB at sequenceid=292 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/bc4420dc7542420c9c42a493a7d2245a 2024-12-08T00:19:55,086 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:55,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617255085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:55,089 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:55,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617255088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:55,093 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:55,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617255090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:55,094 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:55,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617255090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:55,094 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/4c7581889ec34f7cbbb6c432e2cc434c is 50, key is test_row_0/C:col10/1733617194975/Put/seqid=0 2024-12-08T00:19:55,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741898_1074 (size=12301) 2024-12-08T00:19:55,129 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=62.62 KB at sequenceid=292 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/4c7581889ec34f7cbbb6c432e2cc434c 2024-12-08T00:19:55,137 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/a9edcfb5e3e04c009e7cd960c9bf6a3b as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/a9edcfb5e3e04c009e7cd960c9bf6a3b 2024-12-08T00:19:55,145 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/a9edcfb5e3e04c009e7cd960c9bf6a3b, entries=250, sequenceid=292, filesize=16.8 K 2024-12-08T00:19:55,147 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/bc4420dc7542420c9c42a493a7d2245a as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/bc4420dc7542420c9c42a493a7d2245a 2024-12-08T00:19:55,153 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/bc4420dc7542420c9c42a493a7d2245a, entries=150, sequenceid=292, filesize=12.0 K 2024-12-08T00:19:55,155 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/4c7581889ec34f7cbbb6c432e2cc434c as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/4c7581889ec34f7cbbb6c432e2cc434c 2024-12-08T00:19:55,162 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/4c7581889ec34f7cbbb6c432e2cc434c, entries=150, sequenceid=292, filesize=12.0 K 2024-12-08T00:19:55,165 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~187.85 KB/192360, heapSize ~492.89 KB/504720, currentSize=13.42 KB/13740 for f51bdc360ee4fbe2f9447c9b6b4bf1ce in 187ms, sequenceid=292, compaction requested=true 2024-12-08T00:19:55,165 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:19:55,165 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f51bdc360ee4fbe2f9447c9b6b4bf1ce:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:19:55,165 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:19:55,165 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f51bdc360ee4fbe2f9447c9b6b4bf1ce:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T00:19:55,165 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:19:55,165 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:19:55,165 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:19:55,165 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f51bdc360ee4fbe2f9447c9b6b4bf1ce:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:19:55,165 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:19:55,167 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 42063 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:19:55,167 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37183 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:19:55,167 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): f51bdc360ee4fbe2f9447c9b6b4bf1ce/B is initiating minor compaction (all files) 2024-12-08T00:19:55,167 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): f51bdc360ee4fbe2f9447c9b6b4bf1ce/A is initiating minor compaction (all files) 2024-12-08T00:19:55,167 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f51bdc360ee4fbe2f9447c9b6b4bf1ce/A in TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:55,167 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f51bdc360ee4fbe2f9447c9b6b4bf1ce/B in TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:55,167 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/66ce41fe24bd407fb9a5898663514c9f, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/38d5c2aac3cf4d66bb46a5560754c4be, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/a9edcfb5e3e04c009e7cd960c9bf6a3b] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp, totalSize=41.1 K 2024-12-08T00:19:55,167 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/6fce2c86f35c495a8d98df092f937f32, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/3f1d76e9ef494fbebe5358e008a5bd34, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/bc4420dc7542420c9c42a493a7d2245a] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp, totalSize=36.3 K 2024-12-08T00:19:55,168 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 66ce41fe24bd407fb9a5898663514c9f, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1733617192552 2024-12-08T00:19:55,168 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 6fce2c86f35c495a8d98df092f937f32, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1733617192552 2024-12-08T00:19:55,168 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 3f1d76e9ef494fbebe5358e008a5bd34, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1733617192675 2024-12-08T00:19:55,168 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 38d5c2aac3cf4d66bb46a5560754c4be, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1733617192675 2024-12-08T00:19:55,169 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting a9edcfb5e3e04c009e7cd960c9bf6a3b, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1733617193849 2024-12-08T00:19:55,169 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting bc4420dc7542420c9c42a493a7d2245a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1733617193852 2024-12-08T00:19:55,192 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f51bdc360ee4fbe2f9447c9b6b4bf1ce#B#compaction#60 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:19:55,193 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/5e918bcb3fc249d8812417ddcc83aaa4 is 50, key is test_row_0/B:col10/1733617194975/Put/seqid=0 2024-12-08T00:19:55,196 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f51bdc360ee4fbe2f9447c9b6b4bf1ce#A#compaction#61 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:19:55,197 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/02af179e9cd246c19f3749cd3edf85ae is 50, key is test_row_0/A:col10/1733617194975/Put/seqid=0 2024-12-08T00:19:55,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741899_1075 (size=12983) 2024-12-08T00:19:55,236 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/5e918bcb3fc249d8812417ddcc83aaa4 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/5e918bcb3fc249d8812417ddcc83aaa4 2024-12-08T00:19:55,247 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f51bdc360ee4fbe2f9447c9b6b4bf1ce/B of f51bdc360ee4fbe2f9447c9b6b4bf1ce into 5e918bcb3fc249d8812417ddcc83aaa4(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:19:55,247 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:19:55,247 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce., storeName=f51bdc360ee4fbe2f9447c9b6b4bf1ce/B, priority=13, startTime=1733617195165; duration=0sec 2024-12-08T00:19:55,247 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:19:55,247 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f51bdc360ee4fbe2f9447c9b6b4bf1ce:B 2024-12-08T00:19:55,248 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:19:55,249 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37183 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:19:55,250 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): f51bdc360ee4fbe2f9447c9b6b4bf1ce/C is initiating minor compaction (all files) 2024-12-08T00:19:55,250 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f51bdc360ee4fbe2f9447c9b6b4bf1ce/C in TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:55,250 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/73b1c1bd3da54031b59f9aa5734d7f48, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/2856c69bad0b40388efa3f9f535536d7, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/4c7581889ec34f7cbbb6c432e2cc434c] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp, totalSize=36.3 K 2024-12-08T00:19:55,251 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 73b1c1bd3da54031b59f9aa5734d7f48, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1733617192552 2024-12-08T00:19:55,252 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 2856c69bad0b40388efa3f9f535536d7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1733617192675 2024-12-08T00:19:55,252 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 4c7581889ec34f7cbbb6c432e2cc434c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1733617193852 2024-12-08T00:19:55,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741900_1076 (size=12983) 2024-12-08T00:19:55,267 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f51bdc360ee4fbe2f9447c9b6b4bf1ce#C#compaction#62 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:19:55,268 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/433891a92b18436091211bdd1d7f3c9b is 50, key is test_row_0/C:col10/1733617194975/Put/seqid=0 2024-12-08T00:19:55,279 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/02af179e9cd246c19f3749cd3edf85ae as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/02af179e9cd246c19f3749cd3edf85ae 2024-12-08T00:19:55,288 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f51bdc360ee4fbe2f9447c9b6b4bf1ce/A of f51bdc360ee4fbe2f9447c9b6b4bf1ce into 02af179e9cd246c19f3749cd3edf85ae(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:19:55,289 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:19:55,289 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce., storeName=f51bdc360ee4fbe2f9447c9b6b4bf1ce/A, priority=13, startTime=1733617195165; duration=0sec 2024-12-08T00:19:55,289 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:19:55,289 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f51bdc360ee4fbe2f9447c9b6b4bf1ce:A 2024-12-08T00:19:55,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on f51bdc360ee4fbe2f9447c9b6b4bf1ce 2024-12-08T00:19:55,303 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f51bdc360ee4fbe2f9447c9b6b4bf1ce 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-08T00:19:55,303 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=A 2024-12-08T00:19:55,303 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:55,303 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=B 2024-12-08T00:19:55,303 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:55,303 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=C 2024-12-08T00:19:55,303 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:55,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741901_1077 (size=12983) 2024-12-08T00:19:55,312 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/96350ff26757466db694d8a36703c034 is 50, key is test_row_0/A:col10/1733617195300/Put/seqid=0 2024-12-08T00:19:55,320 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/433891a92b18436091211bdd1d7f3c9b as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/433891a92b18436091211bdd1d7f3c9b 2024-12-08T00:19:55,329 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f51bdc360ee4fbe2f9447c9b6b4bf1ce/C of f51bdc360ee4fbe2f9447c9b6b4bf1ce into 433891a92b18436091211bdd1d7f3c9b(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:19:55,329 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:19:55,329 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce., storeName=f51bdc360ee4fbe2f9447c9b6b4bf1ce/C, priority=13, startTime=1733617195165; duration=0sec 2024-12-08T00:19:55,330 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:19:55,330 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f51bdc360ee4fbe2f9447c9b6b4bf1ce:C 2024-12-08T00:19:55,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741902_1078 (size=19621) 2024-12-08T00:19:55,346 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:55,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617255343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:55,349 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:55,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617255345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:55,352 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:55,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617255346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:55,352 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:55,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617255346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:55,355 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=305 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/96350ff26757466db694d8a36703c034 2024-12-08T00:19:55,377 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/54323b62a2d9426383eff9ae52cf8310 is 50, key is test_row_0/B:col10/1733617195300/Put/seqid=0 2024-12-08T00:19:55,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741903_1079 (size=12301) 2024-12-08T00:19:55,450 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:55,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617255448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:55,453 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:55,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617255452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:55,455 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:55,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617255454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:55,456 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:55,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617255454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:55,654 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:55,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617255653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:55,656 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:55,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617255655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:55,661 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:55,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617255658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:55,661 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:55,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617255658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:55,794 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=305 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/54323b62a2d9426383eff9ae52cf8310 2024-12-08T00:19:55,815 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/3e9c9457683a40aa9042f216885f8b13 is 50, key is test_row_0/C:col10/1733617195300/Put/seqid=0 2024-12-08T00:19:55,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741904_1080 (size=12301) 2024-12-08T00:19:55,835 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=305 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/3e9c9457683a40aa9042f216885f8b13 2024-12-08T00:19:55,847 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/96350ff26757466db694d8a36703c034 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/96350ff26757466db694d8a36703c034 2024-12-08T00:19:55,854 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/96350ff26757466db694d8a36703c034, entries=300, sequenceid=305, filesize=19.2 K 2024-12-08T00:19:55,857 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/54323b62a2d9426383eff9ae52cf8310 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/54323b62a2d9426383eff9ae52cf8310 2024-12-08T00:19:55,863 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/54323b62a2d9426383eff9ae52cf8310, entries=150, sequenceid=305, filesize=12.0 K 2024-12-08T00:19:55,864 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/3e9c9457683a40aa9042f216885f8b13 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/3e9c9457683a40aa9042f216885f8b13 2024-12-08T00:19:55,878 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/3e9c9457683a40aa9042f216885f8b13, entries=150, sequenceid=305, filesize=12.0 K 2024-12-08T00:19:55,880 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for f51bdc360ee4fbe2f9447c9b6b4bf1ce in 577ms, sequenceid=305, compaction requested=false 2024-12-08T00:19:55,880 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:19:55,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on f51bdc360ee4fbe2f9447c9b6b4bf1ce 2024-12-08T00:19:55,961 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f51bdc360ee4fbe2f9447c9b6b4bf1ce 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-08T00:19:55,961 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=A 2024-12-08T00:19:55,961 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:55,961 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=B 2024-12-08T00:19:55,962 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:55,962 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=C 2024-12-08T00:19:55,962 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:55,970 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/fc54cc31fcc94a849392b16d44f09485 is 50, key is test_row_0/A:col10/1733617195959/Put/seqid=0 2024-12-08T00:19:55,974 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:55,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617255967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:55,978 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:55,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617255973, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:55,978 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:55,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617255975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:55,979 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:55,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617255975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:55,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741905_1081 (size=12301) 2024-12-08T00:19:55,983 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/fc54cc31fcc94a849392b16d44f09485 2024-12-08T00:19:55,995 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:55,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52398 deadline: 1733617255991, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:56,008 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/3493666867664a77aa8e78ef535cd066 is 50, key is test_row_0/B:col10/1733617195959/Put/seqid=0 2024-12-08T00:19:56,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741906_1082 (size=12301) 2024-12-08T00:19:56,030 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/3493666867664a77aa8e78ef535cd066 2024-12-08T00:19:56,046 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/33ebae3e9de04ed6b8862f3015d1f62a is 50, key is test_row_0/C:col10/1733617195959/Put/seqid=0 2024-12-08T00:19:56,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741907_1083 (size=12301) 2024-12-08T00:19:56,058 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/33ebae3e9de04ed6b8862f3015d1f62a 2024-12-08T00:19:56,069 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/fc54cc31fcc94a849392b16d44f09485 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/fc54cc31fcc94a849392b16d44f09485 2024-12-08T00:19:56,075 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/fc54cc31fcc94a849392b16d44f09485, entries=150, sequenceid=332, filesize=12.0 K 2024-12-08T00:19:56,077 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/3493666867664a77aa8e78ef535cd066 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/3493666867664a77aa8e78ef535cd066 2024-12-08T00:19:56,078 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:56,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617256078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:56,084 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:56,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617256080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:56,085 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:56,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617256080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:56,086 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:56,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617256080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:56,092 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/3493666867664a77aa8e78ef535cd066, entries=150, sequenceid=332, filesize=12.0 K 2024-12-08T00:19:56,094 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/33ebae3e9de04ed6b8862f3015d1f62a as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/33ebae3e9de04ed6b8862f3015d1f62a 2024-12-08T00:19:56,100 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/33ebae3e9de04ed6b8862f3015d1f62a, entries=150, sequenceid=332, filesize=12.0 K 2024-12-08T00:19:56,101 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for f51bdc360ee4fbe2f9447c9b6b4bf1ce in 140ms, sequenceid=332, compaction requested=true 2024-12-08T00:19:56,101 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:19:56,102 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:19:56,102 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f51bdc360ee4fbe2f9447c9b6b4bf1ce:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:19:56,102 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:19:56,102 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f51bdc360ee4fbe2f9447c9b6b4bf1ce:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T00:19:56,102 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:19:56,102 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:19:56,102 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f51bdc360ee4fbe2f9447c9b6b4bf1ce:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:19:56,102 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:19:56,103 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 44905 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:19:56,103 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): f51bdc360ee4fbe2f9447c9b6b4bf1ce/A is initiating minor compaction (all files) 2024-12-08T00:19:56,103 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f51bdc360ee4fbe2f9447c9b6b4bf1ce/A in TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:56,103 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:19:56,103 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/02af179e9cd246c19f3749cd3edf85ae, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/96350ff26757466db694d8a36703c034, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/fc54cc31fcc94a849392b16d44f09485] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp, totalSize=43.9 K 2024-12-08T00:19:56,103 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): f51bdc360ee4fbe2f9447c9b6b4bf1ce/B is initiating minor compaction (all files) 2024-12-08T00:19:56,103 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f51bdc360ee4fbe2f9447c9b6b4bf1ce/B in TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:56,104 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/5e918bcb3fc249d8812417ddcc83aaa4, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/54323b62a2d9426383eff9ae52cf8310, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/3493666867664a77aa8e78ef535cd066] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp, totalSize=36.7 K 2024-12-08T00:19:56,104 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 02af179e9cd246c19f3749cd3edf85ae, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1733617193852 2024-12-08T00:19:56,104 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 5e918bcb3fc249d8812417ddcc83aaa4, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1733617193852 2024-12-08T00:19:56,104 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 96350ff26757466db694d8a36703c034, keycount=300, bloomtype=ROW, size=19.2 K, encoding=NONE, compression=NONE, seqNum=305, earliestPutTs=1733617194980 2024-12-08T00:19:56,105 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 54323b62a2d9426383eff9ae52cf8310, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=305, earliestPutTs=1733617194980 2024-12-08T00:19:56,105 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting fc54cc31fcc94a849392b16d44f09485, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1733617195339 2024-12-08T00:19:56,106 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 3493666867664a77aa8e78ef535cd066, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1733617195339 2024-12-08T00:19:56,122 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f51bdc360ee4fbe2f9447c9b6b4bf1ce#A#compaction#69 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:19:56,123 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/28adb39d75bd4e3db3d9960ac8945480 is 50, key is test_row_0/A:col10/1733617195959/Put/seqid=0 2024-12-08T00:19:56,134 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f51bdc360ee4fbe2f9447c9b6b4bf1ce#B#compaction#70 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:19:56,134 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/e9aaea17185e42e4bb389afd901d12b1 is 50, key is test_row_0/B:col10/1733617195959/Put/seqid=0 2024-12-08T00:19:56,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741908_1084 (size=13085) 2024-12-08T00:19:56,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741909_1085 (size=13085) 2024-12-08T00:19:56,185 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/e9aaea17185e42e4bb389afd901d12b1 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/e9aaea17185e42e4bb389afd901d12b1 2024-12-08T00:19:56,192 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f51bdc360ee4fbe2f9447c9b6b4bf1ce/B of f51bdc360ee4fbe2f9447c9b6b4bf1ce into e9aaea17185e42e4bb389afd901d12b1(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:19:56,192 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:19:56,192 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce., storeName=f51bdc360ee4fbe2f9447c9b6b4bf1ce/B, priority=13, startTime=1733617196102; duration=0sec 2024-12-08T00:19:56,193 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:19:56,193 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f51bdc360ee4fbe2f9447c9b6b4bf1ce:B 2024-12-08T00:19:56,193 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:19:56,195 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:19:56,195 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): f51bdc360ee4fbe2f9447c9b6b4bf1ce/C is initiating minor compaction (all files) 2024-12-08T00:19:56,196 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f51bdc360ee4fbe2f9447c9b6b4bf1ce/C in TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:56,196 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/433891a92b18436091211bdd1d7f3c9b, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/3e9c9457683a40aa9042f216885f8b13, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/33ebae3e9de04ed6b8862f3015d1f62a] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp, totalSize=36.7 K 2024-12-08T00:19:56,196 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 433891a92b18436091211bdd1d7f3c9b, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1733617193852 2024-12-08T00:19:56,197 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 3e9c9457683a40aa9042f216885f8b13, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=305, earliestPutTs=1733617194980 2024-12-08T00:19:56,198 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 33ebae3e9de04ed6b8862f3015d1f62a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1733617195339 2024-12-08T00:19:56,225 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f51bdc360ee4fbe2f9447c9b6b4bf1ce#C#compaction#71 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:19:56,226 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/6c1c4bd6a60547a9864c4515dc49baa9 is 50, key is test_row_0/C:col10/1733617195959/Put/seqid=0 2024-12-08T00:19:56,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741910_1086 (size=13085) 2024-12-08T00:19:56,255 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/6c1c4bd6a60547a9864c4515dc49baa9 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/6c1c4bd6a60547a9864c4515dc49baa9 2024-12-08T00:19:56,263 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f51bdc360ee4fbe2f9447c9b6b4bf1ce/C of f51bdc360ee4fbe2f9447c9b6b4bf1ce into 6c1c4bd6a60547a9864c4515dc49baa9(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:19:56,264 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:19:56,264 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce., storeName=f51bdc360ee4fbe2f9447c9b6b4bf1ce/C, priority=13, startTime=1733617196102; duration=0sec 2024-12-08T00:19:56,266 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:19:56,266 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f51bdc360ee4fbe2f9447c9b6b4bf1ce:C 2024-12-08T00:19:56,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on f51bdc360ee4fbe2f9447c9b6b4bf1ce 2024-12-08T00:19:56,284 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f51bdc360ee4fbe2f9447c9b6b4bf1ce 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-08T00:19:56,285 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=A 2024-12-08T00:19:56,285 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:56,285 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=B 2024-12-08T00:19:56,285 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:56,285 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=C 2024-12-08T00:19:56,285 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:56,298 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/8cd98ebeb19b44fab912e8a0a098b984 is 50, key is test_row_0/A:col10/1733617196283/Put/seqid=0 2024-12-08T00:19:56,322 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:56,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617256318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:56,323 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:56,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617256319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:56,323 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:56,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617256320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:56,324 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:56,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617256321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:56,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741911_1087 (size=12301) 2024-12-08T00:19:56,335 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=345 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/8cd98ebeb19b44fab912e8a0a098b984 2024-12-08T00:19:56,358 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/8530760c6ecf43f085838dff0e2e304e is 50, key is test_row_0/B:col10/1733617196283/Put/seqid=0 2024-12-08T00:19:56,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741912_1088 (size=12301) 2024-12-08T00:19:56,426 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:56,426 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:56,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617256424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:56,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617256424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:56,427 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:56,427 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:56,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617256425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:56,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617256426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:56,576 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/28adb39d75bd4e3db3d9960ac8945480 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/28adb39d75bd4e3db3d9960ac8945480 2024-12-08T00:19:56,588 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f51bdc360ee4fbe2f9447c9b6b4bf1ce/A of f51bdc360ee4fbe2f9447c9b6b4bf1ce into 28adb39d75bd4e3db3d9960ac8945480(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:19:56,589 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:19:56,589 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce., storeName=f51bdc360ee4fbe2f9447c9b6b4bf1ce/A, priority=13, startTime=1733617196101; duration=0sec 2024-12-08T00:19:56,589 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:19:56,589 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f51bdc360ee4fbe2f9447c9b6b4bf1ce:A 2024-12-08T00:19:56,629 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:56,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617256628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:56,630 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:56,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617256629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:56,631 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:56,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617256630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:56,631 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:56,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617256631, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:56,783 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=345 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/8530760c6ecf43f085838dff0e2e304e 2024-12-08T00:19:56,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-08T00:19:56,789 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 20 completed 2024-12-08T00:19:56,791 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T00:19:56,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees 2024-12-08T00:19:56,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-08T00:19:56,793 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:19:56,794 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/5fee7ba809d5448bb2a6fce7c8f8a9bd is 50, key is test_row_0/C:col10/1733617196283/Put/seqid=0 2024-12-08T00:19:56,794 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:19:56,794 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:19:56,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741913_1089 (size=12301) 2024-12-08T00:19:56,803 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=345 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/5fee7ba809d5448bb2a6fce7c8f8a9bd 2024-12-08T00:19:56,812 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/8cd98ebeb19b44fab912e8a0a098b984 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/8cd98ebeb19b44fab912e8a0a098b984 2024-12-08T00:19:56,819 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/8cd98ebeb19b44fab912e8a0a098b984, entries=150, sequenceid=345, filesize=12.0 K 2024-12-08T00:19:56,820 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/8530760c6ecf43f085838dff0e2e304e as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/8530760c6ecf43f085838dff0e2e304e 2024-12-08T00:19:56,832 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/8530760c6ecf43f085838dff0e2e304e, entries=150, sequenceid=345, filesize=12.0 K 2024-12-08T00:19:56,835 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/5fee7ba809d5448bb2a6fce7c8f8a9bd as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/5fee7ba809d5448bb2a6fce7c8f8a9bd 2024-12-08T00:19:56,841 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/5fee7ba809d5448bb2a6fce7c8f8a9bd, entries=150, sequenceid=345, filesize=12.0 K 2024-12-08T00:19:56,842 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for f51bdc360ee4fbe2f9447c9b6b4bf1ce in 558ms, sequenceid=345, compaction requested=false 2024-12-08T00:19:56,843 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:19:56,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-08T00:19:56,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on f51bdc360ee4fbe2f9447c9b6b4bf1ce 2024-12-08T00:19:56,935 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f51bdc360ee4fbe2f9447c9b6b4bf1ce 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-08T00:19:56,936 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=A 2024-12-08T00:19:56,936 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:56,936 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=B 2024-12-08T00:19:56,936 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:56,936 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=C 2024-12-08T00:19:56,936 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:56,944 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/7228392654814d52a611c1f3fc840986 is 50, key is test_row_0/A:col10/1733617196933/Put/seqid=0 2024-12-08T00:19:56,947 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:19:56,947 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-08T00:19:56,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:56,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. as already flushing 2024-12-08T00:19:56,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:56,948 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:56,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:56,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:56,960 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:56,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617256942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:56,963 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:56,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617256960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:56,964 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:56,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617256960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:56,965 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:56,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617256961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:56,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741914_1090 (size=12301) 2024-12-08T00:19:56,976 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=373 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/7228392654814d52a611c1f3fc840986 2024-12-08T00:19:56,990 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/da5e01ef9ad74ce98741b1dc8f54cc7e is 50, key is test_row_0/B:col10/1733617196933/Put/seqid=0 2024-12-08T00:19:57,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741915_1091 (size=12301) 2024-12-08T00:19:57,002 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=373 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/da5e01ef9ad74ce98741b1dc8f54cc7e 2024-12-08T00:19:57,014 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/ffab165b63f44afe8199ca47a303a6d6 is 50, key is test_row_0/C:col10/1733617196933/Put/seqid=0 2024-12-08T00:19:57,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741916_1092 (size=12301) 2024-12-08T00:19:57,064 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:57,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617257062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:57,067 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:57,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617257065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:57,068 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:57,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617257067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:57,068 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:57,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617257068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:57,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-08T00:19:57,100 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:19:57,101 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-08T00:19:57,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:57,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. as already flushing 2024-12-08T00:19:57,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:57,101 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:57,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:57,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:57,254 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:19:57,255 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-08T00:19:57,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:57,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. as already flushing 2024-12-08T00:19:57,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:57,255 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:57,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:57,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:57,268 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:57,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617257266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:57,270 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:57,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617257268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:57,271 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:57,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617257270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:57,274 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:57,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617257273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:57,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-08T00:19:57,408 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:19:57,409 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-08T00:19:57,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:57,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. as already flushing 2024-12-08T00:19:57,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:57,409 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:57,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:57,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:19:57,431 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=373 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/ffab165b63f44afe8199ca47a303a6d6 2024-12-08T00:19:57,439 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/7228392654814d52a611c1f3fc840986 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/7228392654814d52a611c1f3fc840986 2024-12-08T00:19:57,446 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/7228392654814d52a611c1f3fc840986, entries=150, sequenceid=373, filesize=12.0 K 2024-12-08T00:19:57,447 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/da5e01ef9ad74ce98741b1dc8f54cc7e as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/da5e01ef9ad74ce98741b1dc8f54cc7e 2024-12-08T00:19:57,454 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/da5e01ef9ad74ce98741b1dc8f54cc7e, entries=150, sequenceid=373, filesize=12.0 K 2024-12-08T00:19:57,456 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/ffab165b63f44afe8199ca47a303a6d6 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/ffab165b63f44afe8199ca47a303a6d6 2024-12-08T00:19:57,462 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/ffab165b63f44afe8199ca47a303a6d6, entries=150, sequenceid=373, filesize=12.0 K 2024-12-08T00:19:57,462 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=46.96 KB/48090 for f51bdc360ee4fbe2f9447c9b6b4bf1ce in 527ms, sequenceid=373, compaction requested=true 2024-12-08T00:19:57,463 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:19:57,463 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f51bdc360ee4fbe2f9447c9b6b4bf1ce:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:19:57,463 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:19:57,463 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f51bdc360ee4fbe2f9447c9b6b4bf1ce:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T00:19:57,463 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:19:57,463 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:19:57,463 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:19:57,463 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f51bdc360ee4fbe2f9447c9b6b4bf1ce:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:19:57,463 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:19:57,464 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:19:57,464 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:19:57,464 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): f51bdc360ee4fbe2f9447c9b6b4bf1ce/B is initiating minor compaction (all files) 2024-12-08T00:19:57,464 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): f51bdc360ee4fbe2f9447c9b6b4bf1ce/A is initiating minor compaction (all files) 2024-12-08T00:19:57,464 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f51bdc360ee4fbe2f9447c9b6b4bf1ce/B in TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:57,464 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f51bdc360ee4fbe2f9447c9b6b4bf1ce/A in TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:57,465 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/28adb39d75bd4e3db3d9960ac8945480, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/8cd98ebeb19b44fab912e8a0a098b984, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/7228392654814d52a611c1f3fc840986] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp, totalSize=36.8 K 2024-12-08T00:19:57,465 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/e9aaea17185e42e4bb389afd901d12b1, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/8530760c6ecf43f085838dff0e2e304e, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/da5e01ef9ad74ce98741b1dc8f54cc7e] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp, totalSize=36.8 K 2024-12-08T00:19:57,465 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting e9aaea17185e42e4bb389afd901d12b1, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1733617195339 2024-12-08T00:19:57,466 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 28adb39d75bd4e3db3d9960ac8945480, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1733617195339 2024-12-08T00:19:57,466 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 8530760c6ecf43f085838dff0e2e304e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=345, earliestPutTs=1733617195967 2024-12-08T00:19:57,467 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting da5e01ef9ad74ce98741b1dc8f54cc7e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=373, earliestPutTs=1733617196320 2024-12-08T00:19:57,467 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8cd98ebeb19b44fab912e8a0a098b984, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=345, earliestPutTs=1733617195967 2024-12-08T00:19:57,469 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7228392654814d52a611c1f3fc840986, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=373, earliestPutTs=1733617196320 2024-12-08T00:19:57,480 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f51bdc360ee4fbe2f9447c9b6b4bf1ce#B#compaction#78 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:19:57,481 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/ff6e7b067a954e819c06700d201e2bf3 is 50, key is test_row_0/B:col10/1733617196933/Put/seqid=0 2024-12-08T00:19:57,490 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f51bdc360ee4fbe2f9447c9b6b4bf1ce#A#compaction#79 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:19:57,491 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/9d84716e837a4ea990ebc01244fdfc12 is 50, key is test_row_0/A:col10/1733617196933/Put/seqid=0 2024-12-08T00:19:57,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741917_1093 (size=13187) 2024-12-08T00:19:57,515 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/ff6e7b067a954e819c06700d201e2bf3 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/ff6e7b067a954e819c06700d201e2bf3 2024-12-08T00:19:57,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741918_1094 (size=13187) 2024-12-08T00:19:57,527 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/9d84716e837a4ea990ebc01244fdfc12 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/9d84716e837a4ea990ebc01244fdfc12 2024-12-08T00:19:57,528 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f51bdc360ee4fbe2f9447c9b6b4bf1ce/B of f51bdc360ee4fbe2f9447c9b6b4bf1ce into ff6e7b067a954e819c06700d201e2bf3(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:19:57,528 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:19:57,528 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce., storeName=f51bdc360ee4fbe2f9447c9b6b4bf1ce/B, priority=13, startTime=1733617197463; duration=0sec 2024-12-08T00:19:57,528 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:19:57,528 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f51bdc360ee4fbe2f9447c9b6b4bf1ce:B 2024-12-08T00:19:57,528 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:19:57,531 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:19:57,531 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): f51bdc360ee4fbe2f9447c9b6b4bf1ce/C is initiating minor compaction (all files) 2024-12-08T00:19:57,531 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f51bdc360ee4fbe2f9447c9b6b4bf1ce/C in TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:57,531 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/6c1c4bd6a60547a9864c4515dc49baa9, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/5fee7ba809d5448bb2a6fce7c8f8a9bd, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/ffab165b63f44afe8199ca47a303a6d6] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp, totalSize=36.8 K 2024-12-08T00:19:57,532 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 6c1c4bd6a60547a9864c4515dc49baa9, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1733617195339 2024-12-08T00:19:57,533 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 5fee7ba809d5448bb2a6fce7c8f8a9bd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=345, earliestPutTs=1733617195967 2024-12-08T00:19:57,535 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting ffab165b63f44afe8199ca47a303a6d6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=373, earliestPutTs=1733617196320 2024-12-08T00:19:57,538 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f51bdc360ee4fbe2f9447c9b6b4bf1ce/A of f51bdc360ee4fbe2f9447c9b6b4bf1ce into 9d84716e837a4ea990ebc01244fdfc12(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:19:57,538 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:19:57,538 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce., storeName=f51bdc360ee4fbe2f9447c9b6b4bf1ce/A, priority=13, startTime=1733617197463; duration=0sec 2024-12-08T00:19:57,538 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:19:57,539 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f51bdc360ee4fbe2f9447c9b6b4bf1ce:A 2024-12-08T00:19:57,549 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f51bdc360ee4fbe2f9447c9b6b4bf1ce#C#compaction#80 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:19:57,551 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/3eeb460a8b6a4849afa19787848da2c6 is 50, key is test_row_0/C:col10/1733617196933/Put/seqid=0 2024-12-08T00:19:57,564 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:19:57,565 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-08T00:19:57,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:57,565 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2837): Flushing f51bdc360ee4fbe2f9447c9b6b4bf1ce 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-08T00:19:57,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=A 2024-12-08T00:19:57,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:57,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=B 2024-12-08T00:19:57,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:57,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=C 2024-12-08T00:19:57,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:57,572 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. as already flushing 2024-12-08T00:19:57,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on f51bdc360ee4fbe2f9447c9b6b4bf1ce 2024-12-08T00:19:57,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/72260f70a227487faf798be4720d71c9 is 50, key is test_row_0/A:col10/1733617196941/Put/seqid=0 2024-12-08T00:19:57,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741919_1095 (size=13187) 2024-12-08T00:19:57,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741920_1096 (size=12301) 2024-12-08T00:19:57,584 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=385 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/72260f70a227487faf798be4720d71c9 2024-12-08T00:19:57,588 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/3eeb460a8b6a4849afa19787848da2c6 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/3eeb460a8b6a4849afa19787848da2c6 2024-12-08T00:19:57,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/efcca6f3d2074cb9ac2c28db5f5d7989 is 50, key is test_row_0/B:col10/1733617196941/Put/seqid=0 2024-12-08T00:19:57,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741921_1097 (size=12301) 2024-12-08T00:19:57,602 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=385 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/efcca6f3d2074cb9ac2c28db5f5d7989 2024-12-08T00:19:57,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/5958e86bb12d42eb8f5019d032f1ade1 is 50, key is test_row_0/C:col10/1733617196941/Put/seqid=0 2024-12-08T00:19:57,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741922_1098 (size=12301) 2024-12-08T00:19:57,626 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f51bdc360ee4fbe2f9447c9b6b4bf1ce/C of f51bdc360ee4fbe2f9447c9b6b4bf1ce into 3eeb460a8b6a4849afa19787848da2c6(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:19:57,626 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:19:57,626 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce., storeName=f51bdc360ee4fbe2f9447c9b6b4bf1ce/C, priority=13, startTime=1733617197463; duration=0sec 2024-12-08T00:19:57,627 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:19:57,627 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f51bdc360ee4fbe2f9447c9b6b4bf1ce:C 2024-12-08T00:19:57,686 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:57,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617257683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:57,687 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:57,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617257685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:57,690 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:57,690 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:57,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617257687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:57,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617257687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:57,790 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:57,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617257789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:57,791 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:57,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617257789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:57,795 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:57,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617257793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:57,796 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:57,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617257792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:57,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-08T00:19:57,993 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:57,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617257992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:57,994 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:57,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617257994, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:58,001 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:58,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617257997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:58,003 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:58,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617257999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:58,005 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:58,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52398 deadline: 1733617258003, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:58,006 DEBUG [Thread-149 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4155 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce., hostname=017dd09fb407,36703,1733617179335, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T00:19:58,019 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=385 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/5958e86bb12d42eb8f5019d032f1ade1 2024-12-08T00:19:58,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/72260f70a227487faf798be4720d71c9 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/72260f70a227487faf798be4720d71c9 2024-12-08T00:19:58,034 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/72260f70a227487faf798be4720d71c9, entries=150, sequenceid=385, filesize=12.0 K 2024-12-08T00:19:58,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/efcca6f3d2074cb9ac2c28db5f5d7989 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/efcca6f3d2074cb9ac2c28db5f5d7989 2024-12-08T00:19:58,047 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/efcca6f3d2074cb9ac2c28db5f5d7989, entries=150, sequenceid=385, filesize=12.0 K 2024-12-08T00:19:58,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/5958e86bb12d42eb8f5019d032f1ade1 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/5958e86bb12d42eb8f5019d032f1ade1 2024-12-08T00:19:58,056 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/5958e86bb12d42eb8f5019d032f1ade1, entries=150, sequenceid=385, filesize=12.0 K 2024-12-08T00:19:58,057 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=161.02 KB/164880 for f51bdc360ee4fbe2f9447c9b6b4bf1ce in 492ms, sequenceid=385, compaction requested=false 2024-12-08T00:19:58,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2538): Flush status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:19:58,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:58,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=23 2024-12-08T00:19:58,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4106): Remote procedure done, pid=23 2024-12-08T00:19:58,061 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=23, resume processing ppid=22 2024-12-08T00:19:58,061 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=23, ppid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2650 sec 2024-12-08T00:19:58,064 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees in 1.2720 sec 2024-12-08T00:19:58,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on f51bdc360ee4fbe2f9447c9b6b4bf1ce 2024-12-08T00:19:58,299 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f51bdc360ee4fbe2f9447c9b6b4bf1ce 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-12-08T00:19:58,299 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=A 2024-12-08T00:19:58,299 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:58,299 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=B 2024-12-08T00:19:58,299 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:58,300 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=C 2024-12-08T00:19:58,300 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:58,318 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/68b9eb5136d44b618618266bfaba04f8 is 50, key is test_row_0/A:col10/1733617198297/Put/seqid=0 2024-12-08T00:19:58,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741923_1099 (size=12301) 2024-12-08T00:19:58,334 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=414 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/68b9eb5136d44b618618266bfaba04f8 2024-12-08T00:19:58,351 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/bde716422687429d996a1469e0dc538a is 50, key is test_row_0/B:col10/1733617198297/Put/seqid=0 2024-12-08T00:19:58,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741924_1100 (size=12301) 2024-12-08T00:19:58,360 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=414 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/bde716422687429d996a1469e0dc538a 2024-12-08T00:19:58,371 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:58,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617258365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:58,372 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:58,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617258365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:58,373 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:58,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617258367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:58,373 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:58,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617258369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:58,398 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/33fd8157b04b41e4aac271a721f25bfd is 50, key is test_row_0/C:col10/1733617198297/Put/seqid=0 2024-12-08T00:19:58,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741925_1101 (size=12301) 2024-12-08T00:19:58,429 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=414 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/33fd8157b04b41e4aac271a721f25bfd 2024-12-08T00:19:58,443 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/68b9eb5136d44b618618266bfaba04f8 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/68b9eb5136d44b618618266bfaba04f8 2024-12-08T00:19:58,452 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/68b9eb5136d44b618618266bfaba04f8, entries=150, sequenceid=414, filesize=12.0 K 2024-12-08T00:19:58,453 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/bde716422687429d996a1469e0dc538a as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/bde716422687429d996a1469e0dc538a 2024-12-08T00:19:58,460 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/bde716422687429d996a1469e0dc538a, entries=150, sequenceid=414, filesize=12.0 K 2024-12-08T00:19:58,462 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/33fd8157b04b41e4aac271a721f25bfd as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/33fd8157b04b41e4aac271a721f25bfd 2024-12-08T00:19:58,477 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:58,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617258474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:58,479 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:58,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617258475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:58,481 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:58,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617258479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:58,482 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:58,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617258479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:58,485 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/33fd8157b04b41e4aac271a721f25bfd, entries=150, sequenceid=414, filesize=12.0 K 2024-12-08T00:19:58,486 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=40.25 KB/41220 for f51bdc360ee4fbe2f9447c9b6b4bf1ce in 187ms, sequenceid=414, compaction requested=true 2024-12-08T00:19:58,486 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:19:58,486 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f51bdc360ee4fbe2f9447c9b6b4bf1ce:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:19:58,486 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:19:58,486 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:19:58,487 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:19:58,489 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:19:58,489 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:19:58,489 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): f51bdc360ee4fbe2f9447c9b6b4bf1ce/B is initiating minor compaction (all files) 2024-12-08T00:19:58,489 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): f51bdc360ee4fbe2f9447c9b6b4bf1ce/A is initiating minor compaction (all files) 2024-12-08T00:19:58,489 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f51bdc360ee4fbe2f9447c9b6b4bf1ce/B in TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:58,489 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f51bdc360ee4fbe2f9447c9b6b4bf1ce/A in TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:58,490 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/ff6e7b067a954e819c06700d201e2bf3, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/efcca6f3d2074cb9ac2c28db5f5d7989, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/bde716422687429d996a1469e0dc538a] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp, totalSize=36.9 K 2024-12-08T00:19:58,490 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/9d84716e837a4ea990ebc01244fdfc12, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/72260f70a227487faf798be4720d71c9, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/68b9eb5136d44b618618266bfaba04f8] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp, totalSize=36.9 K 2024-12-08T00:19:58,490 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9d84716e837a4ea990ebc01244fdfc12, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=373, earliestPutTs=1733617196320 2024-12-08T00:19:58,490 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting ff6e7b067a954e819c06700d201e2bf3, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=373, earliestPutTs=1733617196320 2024-12-08T00:19:58,491 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 72260f70a227487faf798be4720d71c9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=385, earliestPutTs=1733617196937 2024-12-08T00:19:58,491 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting efcca6f3d2074cb9ac2c28db5f5d7989, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=385, earliestPutTs=1733617196937 2024-12-08T00:19:58,491 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 68b9eb5136d44b618618266bfaba04f8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=414, earliestPutTs=1733617197677 2024-12-08T00:19:58,491 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting bde716422687429d996a1469e0dc538a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=414, earliestPutTs=1733617197677 2024-12-08T00:19:58,503 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f51bdc360ee4fbe2f9447c9b6b4bf1ce:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T00:19:58,503 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:19:58,503 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f51bdc360ee4fbe2f9447c9b6b4bf1ce:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:19:58,503 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:19:58,517 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f51bdc360ee4fbe2f9447c9b6b4bf1ce#A#compaction#87 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:19:58,518 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/2ce7122e951f4aca96653673353b66eb is 50, key is test_row_0/A:col10/1733617198297/Put/seqid=0 2024-12-08T00:19:58,524 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f51bdc360ee4fbe2f9447c9b6b4bf1ce#B#compaction#88 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:19:58,524 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/201fec840e7a41c09f3ddcebeb6fd2e4 is 50, key is test_row_0/B:col10/1733617198297/Put/seqid=0 2024-12-08T00:19:58,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741926_1102 (size=13289) 2024-12-08T00:19:58,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741927_1103 (size=13289) 2024-12-08T00:19:58,693 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f51bdc360ee4fbe2f9447c9b6b4bf1ce 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-08T00:19:58,694 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=A 2024-12-08T00:19:58,694 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:58,694 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=B 2024-12-08T00:19:58,694 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:58,694 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=C 2024-12-08T00:19:58,694 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:58,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on f51bdc360ee4fbe2f9447c9b6b4bf1ce 2024-12-08T00:19:58,719 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/bba088cde4fd422bbb1f622ef3dff888 is 50, key is test_row_0/A:col10/1733617198323/Put/seqid=0 2024-12-08T00:19:58,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741928_1104 (size=12301) 2024-12-08T00:19:58,763 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=425 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/bba088cde4fd422bbb1f622ef3dff888 2024-12-08T00:19:58,771 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:58,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617258756, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:58,777 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/44a2cc0eae884d93906dba425efb5846 is 50, key is test_row_0/B:col10/1733617198323/Put/seqid=0 2024-12-08T00:19:58,779 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:58,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617258770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:58,780 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:58,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617258774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:58,791 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:58,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617258785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:58,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741929_1105 (size=12301) 2024-12-08T00:19:58,813 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=425 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/44a2cc0eae884d93906dba425efb5846 2024-12-08T00:19:58,836 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/1604de5eb43c41bba57be8d23847ba67 is 50, key is test_row_0/C:col10/1733617198323/Put/seqid=0 2024-12-08T00:19:58,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741930_1106 (size=12301) 2024-12-08T00:19:58,876 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=425 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/1604de5eb43c41bba57be8d23847ba67 2024-12-08T00:19:58,883 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/bba088cde4fd422bbb1f622ef3dff888 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/bba088cde4fd422bbb1f622ef3dff888 2024-12-08T00:19:58,885 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:58,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617258874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:58,886 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:58,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617258883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:58,892 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/bba088cde4fd422bbb1f622ef3dff888, entries=150, sequenceid=425, filesize=12.0 K 2024-12-08T00:19:58,893 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:58,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617258889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:58,895 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/44a2cc0eae884d93906dba425efb5846 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/44a2cc0eae884d93906dba425efb5846 2024-12-08T00:19:58,896 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:58,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617258893, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:58,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-08T00:19:58,899 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 22 completed 2024-12-08T00:19:58,900 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T00:19:58,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees 2024-12-08T00:19:58,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-08T00:19:58,903 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:19:58,904 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:19:58,905 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=25, ppid=24, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:19:58,911 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/44a2cc0eae884d93906dba425efb5846, entries=150, sequenceid=425, filesize=12.0 K 2024-12-08T00:19:58,912 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/1604de5eb43c41bba57be8d23847ba67 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/1604de5eb43c41bba57be8d23847ba67 2024-12-08T00:19:58,929 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/1604de5eb43c41bba57be8d23847ba67, entries=150, sequenceid=425, filesize=12.0 K 2024-12-08T00:19:58,930 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for f51bdc360ee4fbe2f9447c9b6b4bf1ce in 237ms, sequenceid=425, compaction requested=true 2024-12-08T00:19:58,930 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:19:58,931 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f51bdc360ee4fbe2f9447c9b6b4bf1ce:A, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:19:58,931 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-08T00:19:58,931 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f51bdc360ee4fbe2f9447c9b6b4bf1ce:B, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:19:58,931 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-12-08T00:19:58,931 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f51bdc360ee4fbe2f9447c9b6b4bf1ce:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:19:58,931 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=4), splitQueue=0 2024-12-08T00:19:58,970 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/2ce7122e951f4aca96653673353b66eb as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/2ce7122e951f4aca96653673353b66eb 2024-12-08T00:19:58,977 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f51bdc360ee4fbe2f9447c9b6b4bf1ce/A of f51bdc360ee4fbe2f9447c9b6b4bf1ce into 2ce7122e951f4aca96653673353b66eb(size=13.0 K), total size for store is 25.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:19:58,977 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:19:58,977 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce., storeName=f51bdc360ee4fbe2f9447c9b6b4bf1ce/A, priority=13, startTime=1733617198486; duration=0sec 2024-12-08T00:19:58,977 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=4), splitQueue=0 2024-12-08T00:19:58,977 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f51bdc360ee4fbe2f9447c9b6b4bf1ce:A 2024-12-08T00:19:58,977 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f51bdc360ee4fbe2f9447c9b6b4bf1ce:A 2024-12-08T00:19:58,977 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T00:19:58,979 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50090 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T00:19:58,979 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): f51bdc360ee4fbe2f9447c9b6b4bf1ce/C is initiating minor compaction (all files) 2024-12-08T00:19:58,979 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f51bdc360ee4fbe2f9447c9b6b4bf1ce/C in TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:58,979 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/3eeb460a8b6a4849afa19787848da2c6, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/5958e86bb12d42eb8f5019d032f1ade1, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/33fd8157b04b41e4aac271a721f25bfd, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/1604de5eb43c41bba57be8d23847ba67] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp, totalSize=48.9 K 2024-12-08T00:19:58,980 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3eeb460a8b6a4849afa19787848da2c6, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=373, earliestPutTs=1733617196320 2024-12-08T00:19:58,980 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5958e86bb12d42eb8f5019d032f1ade1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=385, earliestPutTs=1733617196937 2024-12-08T00:19:58,981 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 33fd8157b04b41e4aac271a721f25bfd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=414, earliestPutTs=1733617197677 2024-12-08T00:19:58,982 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1604de5eb43c41bba57be8d23847ba67, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=425, earliestPutTs=1733617198323 2024-12-08T00:19:58,985 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/201fec840e7a41c09f3ddcebeb6fd2e4 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/201fec840e7a41c09f3ddcebeb6fd2e4 2024-12-08T00:19:58,991 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f51bdc360ee4fbe2f9447c9b6b4bf1ce/B of f51bdc360ee4fbe2f9447c9b6b4bf1ce into 201fec840e7a41c09f3ddcebeb6fd2e4(size=13.0 K), total size for store is 25.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:19:58,991 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:19:58,991 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce., storeName=f51bdc360ee4fbe2f9447c9b6b4bf1ce/B, priority=13, startTime=1733617198486; duration=0sec 2024-12-08T00:19:58,992 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-12-08T00:19:58,992 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f51bdc360ee4fbe2f9447c9b6b4bf1ce:B 2024-12-08T00:19:58,992 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f51bdc360ee4fbe2f9447c9b6b4bf1ce:B 2024-12-08T00:19:58,992 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 4 compacting, 0 eligible, 16 blocking 2024-12-08T00:19:58,993 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-08T00:19:58,993 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-08T00:19:58,993 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. because compaction request was cancelled 2024-12-08T00:19:58,993 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f51bdc360ee4fbe2f9447c9b6b4bf1ce:C 2024-12-08T00:19:58,993 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-08T00:19:58,995 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-08T00:19:58,995 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-08T00:19:58,995 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. because compaction request was cancelled 2024-12-08T00:19:58,995 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f51bdc360ee4fbe2f9447c9b6b4bf1ce:B 2024-12-08T00:19:58,995 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-08T00:19:58,996 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-08T00:19:58,996 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-08T00:19:58,996 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. because compaction request was cancelled 2024-12-08T00:19:58,996 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f51bdc360ee4fbe2f9447c9b6b4bf1ce:A 2024-12-08T00:19:59,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-08T00:19:59,012 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f51bdc360ee4fbe2f9447c9b6b4bf1ce#C#compaction#92 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:19:59,013 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/028745e00a8c4d3b93d6ad4f179f36bc is 50, key is test_row_0/C:col10/1733617198323/Put/seqid=0 2024-12-08T00:19:59,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741931_1107 (size=13323) 2024-12-08T00:19:59,045 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/028745e00a8c4d3b93d6ad4f179f36bc as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/028745e00a8c4d3b93d6ad4f179f36bc 2024-12-08T00:19:59,053 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in f51bdc360ee4fbe2f9447c9b6b4bf1ce/C of f51bdc360ee4fbe2f9447c9b6b4bf1ce into 028745e00a8c4d3b93d6ad4f179f36bc(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:19:59,053 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:19:59,053 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce., storeName=f51bdc360ee4fbe2f9447c9b6b4bf1ce/C, priority=12, startTime=1733617198931; duration=0sec 2024-12-08T00:19:59,053 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:19:59,053 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f51bdc360ee4fbe2f9447c9b6b4bf1ce:C 2024-12-08T00:19:59,067 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:19:59,067 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-08T00:19:59,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:59,068 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2837): Flushing f51bdc360ee4fbe2f9447c9b6b4bf1ce 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-08T00:19:59,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=A 2024-12-08T00:19:59,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:59,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=B 2024-12-08T00:19:59,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:59,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=C 2024-12-08T00:19:59,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:59,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/dd2c1df64d38483095b81c45b60a62dd is 50, key is test_row_0/A:col10/1733617198768/Put/seqid=0 2024-12-08T00:19:59,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on f51bdc360ee4fbe2f9447c9b6b4bf1ce 2024-12-08T00:19:59,095 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. as already flushing 2024-12-08T00:19:59,118 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:59,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617259109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:59,119 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:59,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617259115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:59,120 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:59,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617259115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:59,122 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:59,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617259116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:59,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741932_1108 (size=12301) 2024-12-08T00:19:59,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-08T00:19:59,224 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:59,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617259220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:59,225 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:59,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617259221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:59,225 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:59,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617259221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:59,227 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:59,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617259225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:59,432 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:59,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617259427, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:59,433 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:59,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617259428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:59,434 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:59,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617259429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:59,434 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:59,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617259429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:59,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-08T00:19:59,527 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=454 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/dd2c1df64d38483095b81c45b60a62dd 2024-12-08T00:19:59,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/f10674e608904e2cbff33c3483ba8a43 is 50, key is test_row_0/B:col10/1733617198768/Put/seqid=0 2024-12-08T00:19:59,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741933_1109 (size=12301) 2024-12-08T00:19:59,619 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=454 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/f10674e608904e2cbff33c3483ba8a43 2024-12-08T00:19:59,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/dccfbc395d1b47ebbcd5e9353f505cb6 is 50, key is test_row_0/C:col10/1733617198768/Put/seqid=0 2024-12-08T00:19:59,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741934_1110 (size=12301) 2024-12-08T00:19:59,679 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=454 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/dccfbc395d1b47ebbcd5e9353f505cb6 2024-12-08T00:19:59,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/dd2c1df64d38483095b81c45b60a62dd as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/dd2c1df64d38483095b81c45b60a62dd 2024-12-08T00:19:59,710 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/dd2c1df64d38483095b81c45b60a62dd, entries=150, sequenceid=454, filesize=12.0 K 2024-12-08T00:19:59,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/f10674e608904e2cbff33c3483ba8a43 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/f10674e608904e2cbff33c3483ba8a43 2024-12-08T00:19:59,718 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/f10674e608904e2cbff33c3483ba8a43, entries=150, sequenceid=454, filesize=12.0 K 2024-12-08T00:19:59,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/dccfbc395d1b47ebbcd5e9353f505cb6 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/dccfbc395d1b47ebbcd5e9353f505cb6 2024-12-08T00:19:59,729 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/dccfbc395d1b47ebbcd5e9353f505cb6, entries=150, sequenceid=454, filesize=12.0 K 2024-12-08T00:19:59,731 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for f51bdc360ee4fbe2f9447c9b6b4bf1ce in 663ms, sequenceid=454, compaction requested=true 2024-12-08T00:19:59,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2538): Flush status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:19:59,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:19:59,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=25 2024-12-08T00:19:59,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4106): Remote procedure done, pid=25 2024-12-08T00:19:59,735 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=25, resume processing ppid=24 2024-12-08T00:19:59,735 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=25, ppid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 829 msec 2024-12-08T00:19:59,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on f51bdc360ee4fbe2f9447c9b6b4bf1ce 2024-12-08T00:19:59,737 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f51bdc360ee4fbe2f9447c9b6b4bf1ce 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-08T00:19:59,737 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=A 2024-12-08T00:19:59,737 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:59,743 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=B 2024-12-08T00:19:59,743 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:59,743 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=C 2024-12-08T00:19:59,743 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:19:59,743 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees in 836 msec 2024-12-08T00:19:59,757 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/6297d1924d4e4a3ebdc3d45c9452fa7c is 50, key is test_row_0/A:col10/1733617199114/Put/seqid=0 2024-12-08T00:19:59,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741935_1111 (size=14741) 2024-12-08T00:19:59,815 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=465 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/6297d1924d4e4a3ebdc3d45c9452fa7c 2024-12-08T00:19:59,839 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:59,839 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:59,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617259827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:59,840 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:59,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617259829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:59,843 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/b9af2301230c43ab9fde757001e2c137 is 50, key is test_row_0/B:col10/1733617199114/Put/seqid=0 2024-12-08T00:19:59,844 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:59,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617259842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:59,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617259827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:59,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741936_1112 (size=12301) 2024-12-08T00:19:59,915 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=465 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/b9af2301230c43ab9fde757001e2c137 2024-12-08T00:19:59,929 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/e25ad532713b40819f1c846fe0e6d975 is 50, key is test_row_0/C:col10/1733617199114/Put/seqid=0 2024-12-08T00:19:59,950 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:59,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617259946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:59,950 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:59,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617259946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:59,952 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:59,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617259948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:59,963 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:19:59,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617259957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:19:59,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741937_1113 (size=12301) 2024-12-08T00:20:00,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-08T00:20:00,007 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 24 completed 2024-12-08T00:20:00,033 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T00:20:00,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees 2024-12-08T00:20:00,035 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:20:00,036 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:20:00,036 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=27, ppid=26, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:20:00,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-08T00:20:00,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-08T00:20:00,158 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:00,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617260154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:00,159 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:00,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617260156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:00,158 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:00,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617260153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:00,170 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:00,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617260164, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:00,192 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:00,193 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-08T00:20:00,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:20:00,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. as already flushing 2024-12-08T00:20:00,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:20:00,193 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:00,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:00,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:00,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-08T00:20:00,347 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:00,347 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-08T00:20:00,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:20:00,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. as already flushing 2024-12-08T00:20:00,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:20:00,348 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:00,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:00,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:00,395 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=465 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/e25ad532713b40819f1c846fe0e6d975 2024-12-08T00:20:00,408 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/6297d1924d4e4a3ebdc3d45c9452fa7c as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/6297d1924d4e4a3ebdc3d45c9452fa7c 2024-12-08T00:20:00,415 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/6297d1924d4e4a3ebdc3d45c9452fa7c, entries=200, sequenceid=465, filesize=14.4 K 2024-12-08T00:20:00,416 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/b9af2301230c43ab9fde757001e2c137 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/b9af2301230c43ab9fde757001e2c137 2024-12-08T00:20:00,423 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/b9af2301230c43ab9fde757001e2c137, entries=150, sequenceid=465, filesize=12.0 K 2024-12-08T00:20:00,425 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/e25ad532713b40819f1c846fe0e6d975 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/e25ad532713b40819f1c846fe0e6d975 2024-12-08T00:20:00,431 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/e25ad532713b40819f1c846fe0e6d975, entries=150, sequenceid=465, filesize=12.0 K 2024-12-08T00:20:00,433 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for f51bdc360ee4fbe2f9447c9b6b4bf1ce in 696ms, sequenceid=465, compaction requested=true 2024-12-08T00:20:00,433 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:20:00,433 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T00:20:00,433 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f51bdc360ee4fbe2f9447c9b6b4bf1ce:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:20:00,434 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:00,434 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f51bdc360ee4fbe2f9447c9b6b4bf1ce:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T00:20:00,434 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:00,434 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T00:20:00,434 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f51bdc360ee4fbe2f9447c9b6b4bf1ce:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:20:00,434 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:20:00,435 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 52632 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T00:20:00,435 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): f51bdc360ee4fbe2f9447c9b6b4bf1ce/A is initiating minor compaction (all files) 2024-12-08T00:20:00,435 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f51bdc360ee4fbe2f9447c9b6b4bf1ce/A in TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:20:00,435 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/2ce7122e951f4aca96653673353b66eb, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/bba088cde4fd422bbb1f622ef3dff888, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/dd2c1df64d38483095b81c45b60a62dd, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/6297d1924d4e4a3ebdc3d45c9452fa7c] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp, totalSize=51.4 K 2024-12-08T00:20:00,436 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2ce7122e951f4aca96653673353b66eb, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=414, earliestPutTs=1733617197677 2024-12-08T00:20:00,436 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50192 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T00:20:00,436 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): f51bdc360ee4fbe2f9447c9b6b4bf1ce/B is initiating minor compaction (all files) 2024-12-08T00:20:00,436 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f51bdc360ee4fbe2f9447c9b6b4bf1ce/B in TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:20:00,437 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/201fec840e7a41c09f3ddcebeb6fd2e4, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/44a2cc0eae884d93906dba425efb5846, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/f10674e608904e2cbff33c3483ba8a43, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/b9af2301230c43ab9fde757001e2c137] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp, totalSize=49.0 K 2024-12-08T00:20:00,437 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 201fec840e7a41c09f3ddcebeb6fd2e4, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=414, earliestPutTs=1733617197677 2024-12-08T00:20:00,437 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting bba088cde4fd422bbb1f622ef3dff888, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=425, earliestPutTs=1733617198323 2024-12-08T00:20:00,438 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 44a2cc0eae884d93906dba425efb5846, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=425, earliestPutTs=1733617198323 2024-12-08T00:20:00,438 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting dd2c1df64d38483095b81c45b60a62dd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=454, earliestPutTs=1733617198764 2024-12-08T00:20:00,438 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting f10674e608904e2cbff33c3483ba8a43, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=454, earliestPutTs=1733617198764 2024-12-08T00:20:00,438 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6297d1924d4e4a3ebdc3d45c9452fa7c, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=465, earliestPutTs=1733617199100 2024-12-08T00:20:00,439 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting b9af2301230c43ab9fde757001e2c137, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=465, earliestPutTs=1733617199100 2024-12-08T00:20:00,463 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f51bdc360ee4fbe2f9447c9b6b4bf1ce#B#compaction#99 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:20:00,464 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/5000826884534879b553a1cb8b3fad47 is 50, key is test_row_0/B:col10/1733617199114/Put/seqid=0 2024-12-08T00:20:00,470 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f51bdc360ee4fbe2f9447c9b6b4bf1ce#A#compaction#100 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:20:00,471 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/430086fe322f40079827c6865d1ecd1c is 50, key is test_row_0/A:col10/1733617199114/Put/seqid=0 2024-12-08T00:20:00,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on f51bdc360ee4fbe2f9447c9b6b4bf1ce 2024-12-08T00:20:00,472 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f51bdc360ee4fbe2f9447c9b6b4bf1ce 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-08T00:20:00,473 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=A 2024-12-08T00:20:00,474 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:00,474 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=B 2024-12-08T00:20:00,474 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:00,474 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=C 2024-12-08T00:20:00,474 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:00,494 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/955fb05a0bfb465e9efacb15027c909f is 50, key is test_row_0/A:col10/1733617199817/Put/seqid=0 2024-12-08T00:20:00,494 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:00,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617260490, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:00,498 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:00,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617260490, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:00,498 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:00,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617260490, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:00,501 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:00,501 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-08T00:20:00,501 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:20:00,499 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:00,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617260490, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:00,502 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. as already flushing 2024-12-08T00:20:00,502 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:20:00,502 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:00,502 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:00,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:00,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741938_1114 (size=13425) 2024-12-08T00:20:00,537 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/5000826884534879b553a1cb8b3fad47 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/5000826884534879b553a1cb8b3fad47 2024-12-08T00:20:00,546 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in f51bdc360ee4fbe2f9447c9b6b4bf1ce/B of f51bdc360ee4fbe2f9447c9b6b4bf1ce into 5000826884534879b553a1cb8b3fad47(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:20:00,546 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:20:00,546 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce., storeName=f51bdc360ee4fbe2f9447c9b6b4bf1ce/B, priority=12, startTime=1733617200434; duration=0sec 2024-12-08T00:20:00,546 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:20:00,546 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f51bdc360ee4fbe2f9447c9b6b4bf1ce:B 2024-12-08T00:20:00,546 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:20:00,548 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37925 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:20:00,548 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): f51bdc360ee4fbe2f9447c9b6b4bf1ce/C is initiating minor compaction (all files) 2024-12-08T00:20:00,548 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f51bdc360ee4fbe2f9447c9b6b4bf1ce/C in TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:20:00,548 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/028745e00a8c4d3b93d6ad4f179f36bc, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/dccfbc395d1b47ebbcd5e9353f505cb6, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/e25ad532713b40819f1c846fe0e6d975] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp, totalSize=37.0 K 2024-12-08T00:20:00,549 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 028745e00a8c4d3b93d6ad4f179f36bc, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=425, earliestPutTs=1733617198323 2024-12-08T00:20:00,549 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting dccfbc395d1b47ebbcd5e9353f505cb6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=454, earliestPutTs=1733617198764 2024-12-08T00:20:00,550 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting e25ad532713b40819f1c846fe0e6d975, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=465, earliestPutTs=1733617199100 2024-12-08T00:20:00,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741939_1115 (size=13425) 2024-12-08T00:20:00,582 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/430086fe322f40079827c6865d1ecd1c as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/430086fe322f40079827c6865d1ecd1c 2024-12-08T00:20:00,583 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f51bdc360ee4fbe2f9447c9b6b4bf1ce#C#compaction#102 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:20:00,584 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/b34677252a3147b1bed3999ceaef7942 is 50, key is test_row_0/C:col10/1733617199114/Put/seqid=0 2024-12-08T00:20:00,592 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in f51bdc360ee4fbe2f9447c9b6b4bf1ce/A of f51bdc360ee4fbe2f9447c9b6b4bf1ce into 430086fe322f40079827c6865d1ecd1c(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:20:00,592 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:20:00,592 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce., storeName=f51bdc360ee4fbe2f9447c9b6b4bf1ce/A, priority=12, startTime=1733617200433; duration=0sec 2024-12-08T00:20:00,593 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:00,593 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f51bdc360ee4fbe2f9447c9b6b4bf1ce:A 2024-12-08T00:20:00,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741940_1116 (size=14741) 2024-12-08T00:20:00,596 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=492 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/955fb05a0bfb465e9efacb15027c909f 2024-12-08T00:20:00,610 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:00,611 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/b6bc62720ae84e78a7c9b62c73427edc is 50, key is test_row_0/B:col10/1733617199817/Put/seqid=0 2024-12-08T00:20:00,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617260599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:00,611 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:00,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617260600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:00,610 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:00,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617260599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:00,612 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:00,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617260603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:00,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741941_1117 (size=13425) 2024-12-08T00:20:00,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-08T00:20:00,655 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:00,655 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-08T00:20:00,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:20:00,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. as already flushing 2024-12-08T00:20:00,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:20:00,656 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:00,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:00,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:00,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741942_1118 (size=12301) 2024-12-08T00:20:00,663 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=492 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/b6bc62720ae84e78a7c9b62c73427edc 2024-12-08T00:20:00,686 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/460a97921ae54770b5ca84685b067a51 is 50, key is test_row_0/C:col10/1733617199817/Put/seqid=0 2024-12-08T00:20:00,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741943_1119 (size=12301) 2024-12-08T00:20:00,750 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=492 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/460a97921ae54770b5ca84685b067a51 2024-12-08T00:20:00,758 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/955fb05a0bfb465e9efacb15027c909f as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/955fb05a0bfb465e9efacb15027c909f 2024-12-08T00:20:00,764 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/955fb05a0bfb465e9efacb15027c909f, entries=200, sequenceid=492, filesize=14.4 K 2024-12-08T00:20:00,766 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/b6bc62720ae84e78a7c9b62c73427edc as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/b6bc62720ae84e78a7c9b62c73427edc 2024-12-08T00:20:00,772 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/b6bc62720ae84e78a7c9b62c73427edc, entries=150, sequenceid=492, filesize=12.0 K 2024-12-08T00:20:00,773 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/460a97921ae54770b5ca84685b067a51 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/460a97921ae54770b5ca84685b067a51 2024-12-08T00:20:00,780 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/460a97921ae54770b5ca84685b067a51, entries=150, sequenceid=492, filesize=12.0 K 2024-12-08T00:20:00,781 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for f51bdc360ee4fbe2f9447c9b6b4bf1ce in 309ms, sequenceid=492, compaction requested=false 2024-12-08T00:20:00,781 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:20:00,808 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:00,809 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-08T00:20:00,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:20:00,809 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2837): Flushing f51bdc360ee4fbe2f9447c9b6b4bf1ce 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-12-08T00:20:00,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=A 2024-12-08T00:20:00,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:00,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=B 2024-12-08T00:20:00,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:00,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=C 2024-12-08T00:20:00,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:00,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on f51bdc360ee4fbe2f9447c9b6b4bf1ce 2024-12-08T00:20:00,820 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. as already flushing 2024-12-08T00:20:00,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/c64489cc39a74e6bae270594b4659167 is 50, key is test_row_0/A:col10/1733617200480/Put/seqid=0 2024-12-08T00:20:00,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741944_1120 (size=12301) 2024-12-08T00:20:00,862 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=503 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/c64489cc39a74e6bae270594b4659167 2024-12-08T00:20:00,871 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:00,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617260868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:00,872 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:00,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617260869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:00,875 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:00,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617260869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:00,875 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:00,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617260870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:00,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/52eae42e046c4a75b90199db3f8bb77a is 50, key is test_row_0/B:col10/1733617200480/Put/seqid=0 2024-12-08T00:20:00,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741945_1121 (size=12301) 2024-12-08T00:20:00,896 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=503 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/52eae42e046c4a75b90199db3f8bb77a 2024-12-08T00:20:00,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/908ab1a013ed4c8c9211d1ef1dbfdff8 is 50, key is test_row_0/C:col10/1733617200480/Put/seqid=0 2024-12-08T00:20:00,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741946_1122 (size=12301) 2024-12-08T00:20:00,974 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:00,974 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:00,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617260973, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:00,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617260974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:00,979 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:00,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617260976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:00,980 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:00,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617260977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:01,038 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/b34677252a3147b1bed3999ceaef7942 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/b34677252a3147b1bed3999ceaef7942 2024-12-08T00:20:01,047 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f51bdc360ee4fbe2f9447c9b6b4bf1ce/C of f51bdc360ee4fbe2f9447c9b6b4bf1ce into b34677252a3147b1bed3999ceaef7942(size=13.1 K), total size for store is 25.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:20:01,047 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:20:01,047 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce., storeName=f51bdc360ee4fbe2f9447c9b6b4bf1ce/C, priority=13, startTime=1733617200434; duration=0sec 2024-12-08T00:20:01,047 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:01,047 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f51bdc360ee4fbe2f9447c9b6b4bf1ce:C 2024-12-08T00:20:01,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-08T00:20:01,179 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:01,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617261176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:01,179 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:01,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617261177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:01,182 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:01,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617261182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:01,183 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:01,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617261183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:01,339 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=503 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/908ab1a013ed4c8c9211d1ef1dbfdff8 2024-12-08T00:20:01,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/c64489cc39a74e6bae270594b4659167 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/c64489cc39a74e6bae270594b4659167 2024-12-08T00:20:01,365 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/c64489cc39a74e6bae270594b4659167, entries=150, sequenceid=503, filesize=12.0 K 2024-12-08T00:20:01,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/52eae42e046c4a75b90199db3f8bb77a as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/52eae42e046c4a75b90199db3f8bb77a 2024-12-08T00:20:01,375 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/52eae42e046c4a75b90199db3f8bb77a, entries=150, sequenceid=503, filesize=12.0 K 2024-12-08T00:20:01,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/908ab1a013ed4c8c9211d1ef1dbfdff8 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/908ab1a013ed4c8c9211d1ef1dbfdff8 2024-12-08T00:20:01,384 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/908ab1a013ed4c8c9211d1ef1dbfdff8, entries=150, sequenceid=503, filesize=12.0 K 2024-12-08T00:20:01,385 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=161.02 KB/164880 for f51bdc360ee4fbe2f9447c9b6b4bf1ce in 576ms, sequenceid=503, compaction requested=true 2024-12-08T00:20:01,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2538): Flush status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:20:01,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:20:01,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=27 2024-12-08T00:20:01,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4106): Remote procedure done, pid=27 2024-12-08T00:20:01,389 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=27, resume processing ppid=26 2024-12-08T00:20:01,389 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=27, ppid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3510 sec 2024-12-08T00:20:01,391 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees in 1.3570 sec 2024-12-08T00:20:01,485 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f51bdc360ee4fbe2f9447c9b6b4bf1ce 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-12-08T00:20:01,485 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=A 2024-12-08T00:20:01,486 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:01,486 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=B 2024-12-08T00:20:01,486 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:01,486 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=C 2024-12-08T00:20:01,486 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:01,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on f51bdc360ee4fbe2f9447c9b6b4bf1ce 2024-12-08T00:20:01,495 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:01,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617261493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:01,496 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:01,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617261494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:01,497 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:01,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617261494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:01,500 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:01,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617261495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:01,509 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/94af88b184844c54a18e270d9922e48f is 50, key is test_row_0/A:col10/1733617200867/Put/seqid=0 2024-12-08T00:20:01,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741947_1123 (size=12301) 2024-12-08T00:20:01,597 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:01,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617261597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:01,603 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:01,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617261597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:01,603 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:01,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617261598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:01,604 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:01,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617261601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:01,806 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:01,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617261800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:01,807 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:01,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617261804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:01,810 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:01,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617261806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:01,811 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:01,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617261806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:01,955 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=532 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/94af88b184844c54a18e270d9922e48f 2024-12-08T00:20:01,974 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/f7cb0d068a954bdd9b8241ddfeedfc17 is 50, key is test_row_0/B:col10/1733617200867/Put/seqid=0 2024-12-08T00:20:02,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741948_1124 (size=12301) 2024-12-08T00:20:02,014 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=532 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/f7cb0d068a954bdd9b8241ddfeedfc17 2024-12-08T00:20:02,031 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:02,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52398 deadline: 1733617262030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:02,032 DEBUG [Thread-149 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8180 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce., hostname=017dd09fb407,36703,1733617179335, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T00:20:02,042 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/aefaddc7b3f649ec822d47ebce71a491 is 50, key is test_row_0/C:col10/1733617200867/Put/seqid=0 2024-12-08T00:20:02,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741949_1125 (size=12301) 2024-12-08T00:20:02,058 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=532 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/aefaddc7b3f649ec822d47ebce71a491 2024-12-08T00:20:02,068 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/94af88b184844c54a18e270d9922e48f as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/94af88b184844c54a18e270d9922e48f 2024-12-08T00:20:02,074 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/94af88b184844c54a18e270d9922e48f, entries=150, sequenceid=532, filesize=12.0 K 2024-12-08T00:20:02,076 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/f7cb0d068a954bdd9b8241ddfeedfc17 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/f7cb0d068a954bdd9b8241ddfeedfc17 2024-12-08T00:20:02,082 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/f7cb0d068a954bdd9b8241ddfeedfc17, entries=150, sequenceid=532, filesize=12.0 K 2024-12-08T00:20:02,084 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/aefaddc7b3f649ec822d47ebce71a491 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/aefaddc7b3f649ec822d47ebce71a491 2024-12-08T00:20:02,102 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/aefaddc7b3f649ec822d47ebce71a491, entries=150, sequenceid=532, filesize=12.0 K 2024-12-08T00:20:02,103 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for f51bdc360ee4fbe2f9447c9b6b4bf1ce in 618ms, sequenceid=532, compaction requested=true 2024-12-08T00:20:02,103 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:20:02,104 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f51bdc360ee4fbe2f9447c9b6b4bf1ce:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:20:02,104 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:02,104 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T00:20:02,104 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T00:20:02,105 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f51bdc360ee4fbe2f9447c9b6b4bf1ce:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T00:20:02,105 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:02,105 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f51bdc360ee4fbe2f9447c9b6b4bf1ce:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:20:02,105 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:20:02,109 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 52768 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T00:20:02,110 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): f51bdc360ee4fbe2f9447c9b6b4bf1ce/A is initiating minor compaction (all files) 2024-12-08T00:20:02,110 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f51bdc360ee4fbe2f9447c9b6b4bf1ce/A in TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:20:02,110 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/430086fe322f40079827c6865d1ecd1c, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/955fb05a0bfb465e9efacb15027c909f, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/c64489cc39a74e6bae270594b4659167, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/94af88b184844c54a18e270d9922e48f] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp, totalSize=51.5 K 2024-12-08T00:20:02,110 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50328 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T00:20:02,110 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): f51bdc360ee4fbe2f9447c9b6b4bf1ce/B is initiating minor compaction (all files) 2024-12-08T00:20:02,110 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f51bdc360ee4fbe2f9447c9b6b4bf1ce/B in TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:20:02,111 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/5000826884534879b553a1cb8b3fad47, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/b6bc62720ae84e78a7c9b62c73427edc, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/52eae42e046c4a75b90199db3f8bb77a, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/f7cb0d068a954bdd9b8241ddfeedfc17] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp, totalSize=49.1 K 2024-12-08T00:20:02,111 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 430086fe322f40079827c6865d1ecd1c, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=465, earliestPutTs=1733617199100 2024-12-08T00:20:02,111 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 5000826884534879b553a1cb8b3fad47, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=465, earliestPutTs=1733617199100 2024-12-08T00:20:02,112 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 955fb05a0bfb465e9efacb15027c909f, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=492, earliestPutTs=1733617199817 2024-12-08T00:20:02,112 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting b6bc62720ae84e78a7c9b62c73427edc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=492, earliestPutTs=1733617199817 2024-12-08T00:20:02,112 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting c64489cc39a74e6bae270594b4659167, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=503, earliestPutTs=1733617200480 2024-12-08T00:20:02,113 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 52eae42e046c4a75b90199db3f8bb77a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=503, earliestPutTs=1733617200480 2024-12-08T00:20:02,113 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 94af88b184844c54a18e270d9922e48f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=532, earliestPutTs=1733617200866 2024-12-08T00:20:02,113 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting f7cb0d068a954bdd9b8241ddfeedfc17, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=532, earliestPutTs=1733617200866 2024-12-08T00:20:02,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on f51bdc360ee4fbe2f9447c9b6b4bf1ce 2024-12-08T00:20:02,124 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f51bdc360ee4fbe2f9447c9b6b4bf1ce 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-08T00:20:02,125 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=A 2024-12-08T00:20:02,126 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:02,127 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=B 2024-12-08T00:20:02,127 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:02,127 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=C 2024-12-08T00:20:02,127 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:02,131 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f51bdc360ee4fbe2f9447c9b6b4bf1ce#B#compaction#111 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:20:02,132 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/6730f7d1a5c3491badc1d08c402c0e2b is 50, key is test_row_0/B:col10/1733617200867/Put/seqid=0 2024-12-08T00:20:02,145 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f51bdc360ee4fbe2f9447c9b6b4bf1ce#A#compaction#112 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:20:02,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-08T00:20:02,146 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 26 completed 2024-12-08T00:20:02,146 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/52cf7090a8304833b67185b66b8fd5fc is 50, key is test_row_0/A:col10/1733617200867/Put/seqid=0 2024-12-08T00:20:02,147 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T00:20:02,149 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/e3f9823138ff4b6dbdd8741edb5b9fdd is 50, key is test_row_0/A:col10/1733617202122/Put/seqid=0 2024-12-08T00:20:02,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees 2024-12-08T00:20:02,150 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:20:02,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-08T00:20:02,152 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:20:02,153 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:20:02,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741950_1126 (size=13561) 2024-12-08T00:20:02,170 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/6730f7d1a5c3491badc1d08c402c0e2b as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/6730f7d1a5c3491badc1d08c402c0e2b 2024-12-08T00:20:02,176 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in f51bdc360ee4fbe2f9447c9b6b4bf1ce/B of f51bdc360ee4fbe2f9447c9b6b4bf1ce into 6730f7d1a5c3491badc1d08c402c0e2b(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:20:02,176 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:20:02,177 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce., storeName=f51bdc360ee4fbe2f9447c9b6b4bf1ce/B, priority=12, startTime=1733617202104; duration=0sec 2024-12-08T00:20:02,177 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:20:02,177 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f51bdc360ee4fbe2f9447c9b6b4bf1ce:B 2024-12-08T00:20:02,177 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T00:20:02,177 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:02,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617262170, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:02,177 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:02,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 243 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617262171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:02,178 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:02,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617262172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:02,178 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50328 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T00:20:02,178 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): f51bdc360ee4fbe2f9447c9b6b4bf1ce/C is initiating minor compaction (all files) 2024-12-08T00:20:02,178 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f51bdc360ee4fbe2f9447c9b6b4bf1ce/C in TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:20:02,179 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/b34677252a3147b1bed3999ceaef7942, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/460a97921ae54770b5ca84685b067a51, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/908ab1a013ed4c8c9211d1ef1dbfdff8, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/aefaddc7b3f649ec822d47ebce71a491] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp, totalSize=49.1 K 2024-12-08T00:20:02,180 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting b34677252a3147b1bed3999ceaef7942, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=465, earliestPutTs=1733617199100 2024-12-08T00:20:02,180 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 460a97921ae54770b5ca84685b067a51, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=492, earliestPutTs=1733617199817 2024-12-08T00:20:02,181 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 908ab1a013ed4c8c9211d1ef1dbfdff8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=503, earliestPutTs=1733617200480 2024-12-08T00:20:02,181 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting aefaddc7b3f649ec822d47ebce71a491, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=532, earliestPutTs=1733617200866 2024-12-08T00:20:02,182 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:02,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617262176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:02,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741951_1127 (size=13561) 2024-12-08T00:20:02,205 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/52cf7090a8304833b67185b66b8fd5fc as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/52cf7090a8304833b67185b66b8fd5fc 2024-12-08T00:20:02,209 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f51bdc360ee4fbe2f9447c9b6b4bf1ce#C#compaction#114 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:20:02,209 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/43b2105185dd43ccbd21d931abc68267 is 50, key is test_row_0/C:col10/1733617200867/Put/seqid=0 2024-12-08T00:20:02,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741952_1128 (size=17181) 2024-12-08T00:20:02,212 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in f51bdc360ee4fbe2f9447c9b6b4bf1ce/A of f51bdc360ee4fbe2f9447c9b6b4bf1ce into 52cf7090a8304833b67185b66b8fd5fc(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:20:02,212 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:20:02,212 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce., storeName=f51bdc360ee4fbe2f9447c9b6b4bf1ce/A, priority=12, startTime=1733617202104; duration=0sec 2024-12-08T00:20:02,212 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:02,212 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f51bdc360ee4fbe2f9447c9b6b4bf1ce:A 2024-12-08T00:20:02,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741953_1129 (size=13561) 2024-12-08T00:20:02,244 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/43b2105185dd43ccbd21d931abc68267 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/43b2105185dd43ccbd21d931abc68267 2024-12-08T00:20:02,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-08T00:20:02,267 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in f51bdc360ee4fbe2f9447c9b6b4bf1ce/C of f51bdc360ee4fbe2f9447c9b6b4bf1ce into 43b2105185dd43ccbd21d931abc68267(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:20:02,267 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:20:02,267 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce., storeName=f51bdc360ee4fbe2f9447c9b6b4bf1ce/C, priority=12, startTime=1733617202105; duration=0sec 2024-12-08T00:20:02,268 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:02,268 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f51bdc360ee4fbe2f9447c9b6b4bf1ce:C 2024-12-08T00:20:02,281 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:02,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617262278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:02,281 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:02,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 245 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617262279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:02,282 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:02,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617262279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:02,287 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:02,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617262284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:02,305 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:02,306 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-12-08T00:20:02,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:20:02,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. as already flushing 2024-12-08T00:20:02,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:20:02,307 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:02,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:02,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:02,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-08T00:20:02,460 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:02,461 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-12-08T00:20:02,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:20:02,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. as already flushing 2024-12-08T00:20:02,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:20:02,461 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:02,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:02,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:02,482 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:02,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 248 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617262482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:02,483 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:02,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 247 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617262483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:02,485 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:02,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 248 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617262485, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:02,490 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:02,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 248 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617262488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:02,612 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=544 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/e3f9823138ff4b6dbdd8741edb5b9fdd 2024-12-08T00:20:02,613 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:02,613 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-12-08T00:20:02,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:20:02,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. as already flushing 2024-12-08T00:20:02,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:20:02,615 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:02,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:02,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:02,625 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/531aff85df194ab7ba3a171fb7020d05 is 50, key is test_row_0/B:col10/1733617202122/Put/seqid=0 2024-12-08T00:20:02,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741954_1130 (size=12301) 2024-12-08T00:20:02,681 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=544 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/531aff85df194ab7ba3a171fb7020d05 2024-12-08T00:20:02,702 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/1cf7e5c858194411b4ec57eb9641163c is 50, key is test_row_0/C:col10/1733617202122/Put/seqid=0 2024-12-08T00:20:02,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741955_1131 (size=12301) 2024-12-08T00:20:02,753 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=544 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/1cf7e5c858194411b4ec57eb9641163c 2024-12-08T00:20:02,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-08T00:20:02,762 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/e3f9823138ff4b6dbdd8741edb5b9fdd as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/e3f9823138ff4b6dbdd8741edb5b9fdd 2024-12-08T00:20:02,767 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:02,768 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-12-08T00:20:02,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:20:02,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. as already flushing 2024-12-08T00:20:02,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:20:02,768 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:02,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:02,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:02,770 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/e3f9823138ff4b6dbdd8741edb5b9fdd, entries=250, sequenceid=544, filesize=16.8 K 2024-12-08T00:20:02,771 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/531aff85df194ab7ba3a171fb7020d05 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/531aff85df194ab7ba3a171fb7020d05 2024-12-08T00:20:02,778 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/531aff85df194ab7ba3a171fb7020d05, entries=150, sequenceid=544, filesize=12.0 K 2024-12-08T00:20:02,780 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/1cf7e5c858194411b4ec57eb9641163c as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/1cf7e5c858194411b4ec57eb9641163c 2024-12-08T00:20:02,784 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:02,785 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/1cf7e5c858194411b4ec57eb9641163c, entries=150, sequenceid=544, filesize=12.0 K 2024-12-08T00:20:02,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 250 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617262784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:02,787 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for f51bdc360ee4fbe2f9447c9b6b4bf1ce in 663ms, sequenceid=544, compaction requested=false 2024-12-08T00:20:02,787 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:20:02,791 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f51bdc360ee4fbe2f9447c9b6b4bf1ce 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-08T00:20:02,791 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=A 2024-12-08T00:20:02,791 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:02,792 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=B 2024-12-08T00:20:02,792 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:02,792 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=C 2024-12-08T00:20:02,792 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:02,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on f51bdc360ee4fbe2f9447c9b6b4bf1ce 2024-12-08T00:20:02,809 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/3804da64924240e1b7b6039917e4e026 is 50, key is test_row_0/A:col10/1733617202789/Put/seqid=0 2024-12-08T00:20:02,813 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:02,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 252 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617262807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:02,814 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:02,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 252 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617262807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:02,814 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:02,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 254 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617262808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:02,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741956_1132 (size=17181) 2024-12-08T00:20:02,917 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:02,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 254 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617262916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:02,917 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:02,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 254 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617262916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:02,918 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:02,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 256 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617262916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:02,921 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:02,921 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-12-08T00:20:02,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:20:02,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. as already flushing 2024-12-08T00:20:02,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:20:02,922 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:02,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:02,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:03,076 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:03,080 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-12-08T00:20:03,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:20:03,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. as already flushing 2024-12-08T00:20:03,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:20:03,081 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:03,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:03,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:03,121 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:03,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 256 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617263119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:03,121 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:03,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 258 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617263121, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:03,122 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:03,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 256 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617263121, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:03,240 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:03,241 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-12-08T00:20:03,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:20:03,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. as already flushing 2024-12-08T00:20:03,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:20:03,241 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:03,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:03,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:03,245 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=573 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/3804da64924240e1b7b6039917e4e026 2024-12-08T00:20:03,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-08T00:20:03,263 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/b172fdd37ad24c0c8627b95683df1af2 is 50, key is test_row_0/B:col10/1733617202789/Put/seqid=0 2024-12-08T00:20:03,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741957_1133 (size=12301) 2024-12-08T00:20:03,270 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=573 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/b172fdd37ad24c0c8627b95683df1af2 2024-12-08T00:20:03,288 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/5b6fee2eb08249928c46086e3cb18c01 is 50, key is test_row_0/C:col10/1733617202789/Put/seqid=0 2024-12-08T00:20:03,291 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:03,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 252 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617263290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:03,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741958_1134 (size=12301) 2024-12-08T00:20:03,301 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=573 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/5b6fee2eb08249928c46086e3cb18c01 2024-12-08T00:20:03,307 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/3804da64924240e1b7b6039917e4e026 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/3804da64924240e1b7b6039917e4e026 2024-12-08T00:20:03,312 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/3804da64924240e1b7b6039917e4e026, entries=250, sequenceid=573, filesize=16.8 K 2024-12-08T00:20:03,313 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/b172fdd37ad24c0c8627b95683df1af2 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/b172fdd37ad24c0c8627b95683df1af2 2024-12-08T00:20:03,318 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/b172fdd37ad24c0c8627b95683df1af2, entries=150, sequenceid=573, filesize=12.0 K 2024-12-08T00:20:03,319 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/5b6fee2eb08249928c46086e3cb18c01 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/5b6fee2eb08249928c46086e3cb18c01 2024-12-08T00:20:03,324 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/5b6fee2eb08249928c46086e3cb18c01, entries=150, sequenceid=573, filesize=12.0 K 2024-12-08T00:20:03,325 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for f51bdc360ee4fbe2f9447c9b6b4bf1ce in 534ms, sequenceid=573, compaction requested=true 2024-12-08T00:20:03,325 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:20:03,325 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f51bdc360ee4fbe2f9447c9b6b4bf1ce:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:20:03,326 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:03,326 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:20:03,326 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f51bdc360ee4fbe2f9447c9b6b4bf1ce:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T00:20:03,326 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:03,326 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f51bdc360ee4fbe2f9447c9b6b4bf1ce:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:20:03,326 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:20:03,326 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:20:03,327 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38163 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:20:03,327 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): f51bdc360ee4fbe2f9447c9b6b4bf1ce/B is initiating minor compaction (all files) 2024-12-08T00:20:03,327 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f51bdc360ee4fbe2f9447c9b6b4bf1ce/B in TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:20:03,327 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/6730f7d1a5c3491badc1d08c402c0e2b, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/531aff85df194ab7ba3a171fb7020d05, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/b172fdd37ad24c0c8627b95683df1af2] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp, totalSize=37.3 K 2024-12-08T00:20:03,328 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 47923 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:20:03,328 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): f51bdc360ee4fbe2f9447c9b6b4bf1ce/A is initiating minor compaction (all files) 2024-12-08T00:20:03,328 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f51bdc360ee4fbe2f9447c9b6b4bf1ce/A in TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:20:03,328 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/52cf7090a8304833b67185b66b8fd5fc, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/e3f9823138ff4b6dbdd8741edb5b9fdd, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/3804da64924240e1b7b6039917e4e026] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp, totalSize=46.8 K 2024-12-08T00:20:03,328 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 6730f7d1a5c3491badc1d08c402c0e2b, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=532, earliestPutTs=1733617200866 2024-12-08T00:20:03,330 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 52cf7090a8304833b67185b66b8fd5fc, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=532, earliestPutTs=1733617200866 2024-12-08T00:20:03,330 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 531aff85df194ab7ba3a171fb7020d05, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=544, earliestPutTs=1733617201493 2024-12-08T00:20:03,330 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting e3f9823138ff4b6dbdd8741edb5b9fdd, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=544, earliestPutTs=1733617201493 2024-12-08T00:20:03,332 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3804da64924240e1b7b6039917e4e026, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=573, earliestPutTs=1733617202169 2024-12-08T00:20:03,332 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting b172fdd37ad24c0c8627b95683df1af2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=573, earliestPutTs=1733617202174 2024-12-08T00:20:03,345 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f51bdc360ee4fbe2f9447c9b6b4bf1ce#A#compaction#120 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:20:03,345 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/9096e6397f1d4a08ac404789f917a341 is 50, key is test_row_0/A:col10/1733617202789/Put/seqid=0 2024-12-08T00:20:03,346 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f51bdc360ee4fbe2f9447c9b6b4bf1ce#B#compaction#121 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:20:03,347 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/4195ec84c5b54f74ace2d4da36080fc7 is 50, key is test_row_0/B:col10/1733617202789/Put/seqid=0 2024-12-08T00:20:03,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741959_1135 (size=13663) 2024-12-08T00:20:03,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741960_1136 (size=13663) 2024-12-08T00:20:03,368 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/4195ec84c5b54f74ace2d4da36080fc7 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/4195ec84c5b54f74ace2d4da36080fc7 2024-12-08T00:20:03,370 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/9096e6397f1d4a08ac404789f917a341 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/9096e6397f1d4a08ac404789f917a341 2024-12-08T00:20:03,379 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f51bdc360ee4fbe2f9447c9b6b4bf1ce/B of f51bdc360ee4fbe2f9447c9b6b4bf1ce into 4195ec84c5b54f74ace2d4da36080fc7(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:20:03,379 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:20:03,379 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce., storeName=f51bdc360ee4fbe2f9447c9b6b4bf1ce/B, priority=13, startTime=1733617203326; duration=0sec 2024-12-08T00:20:03,379 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f51bdc360ee4fbe2f9447c9b6b4bf1ce/A of f51bdc360ee4fbe2f9447c9b6b4bf1ce into 9096e6397f1d4a08ac404789f917a341(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:20:03,379 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:20:03,379 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:20:03,379 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f51bdc360ee4fbe2f9447c9b6b4bf1ce:B 2024-12-08T00:20:03,379 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce., storeName=f51bdc360ee4fbe2f9447c9b6b4bf1ce/A, priority=13, startTime=1733617203325; duration=0sec 2024-12-08T00:20:03,380 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:20:03,380 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:03,380 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f51bdc360ee4fbe2f9447c9b6b4bf1ce:A 2024-12-08T00:20:03,381 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38163 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:20:03,381 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): f51bdc360ee4fbe2f9447c9b6b4bf1ce/C is initiating minor compaction (all files) 2024-12-08T00:20:03,381 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f51bdc360ee4fbe2f9447c9b6b4bf1ce/C in TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:20:03,381 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/43b2105185dd43ccbd21d931abc68267, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/1cf7e5c858194411b4ec57eb9641163c, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/5b6fee2eb08249928c46086e3cb18c01] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp, totalSize=37.3 K 2024-12-08T00:20:03,381 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 43b2105185dd43ccbd21d931abc68267, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=532, earliestPutTs=1733617200866 2024-12-08T00:20:03,382 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 1cf7e5c858194411b4ec57eb9641163c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=544, earliestPutTs=1733617201493 2024-12-08T00:20:03,382 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 5b6fee2eb08249928c46086e3cb18c01, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=573, earliestPutTs=1733617202174 2024-12-08T00:20:03,393 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:03,393 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-12-08T00:20:03,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:20:03,394 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2837): Flushing f51bdc360ee4fbe2f9447c9b6b4bf1ce 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-08T00:20:03,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=A 2024-12-08T00:20:03,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:03,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=B 2024-12-08T00:20:03,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:03,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=C 2024-12-08T00:20:03,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:03,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/4a3ccf1af1d84af9ace55738e18a9d63 is 50, key is test_row_0/A:col10/1733617202805/Put/seqid=0 2024-12-08T00:20:03,402 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f51bdc360ee4fbe2f9447c9b6b4bf1ce#C#compaction#122 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:20:03,403 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/dd9308f78d924a5ba5edba4a73892050 is 50, key is test_row_0/C:col10/1733617202789/Put/seqid=0 2024-12-08T00:20:03,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741961_1137 (size=13663) 2024-12-08T00:20:03,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on f51bdc360ee4fbe2f9447c9b6b4bf1ce 2024-12-08T00:20:03,425 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. as already flushing 2024-12-08T00:20:03,426 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/dd9308f78d924a5ba5edba4a73892050 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/dd9308f78d924a5ba5edba4a73892050 2024-12-08T00:20:03,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741962_1138 (size=12301) 2024-12-08T00:20:03,438 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=585 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/4a3ccf1af1d84af9ace55738e18a9d63 2024-12-08T00:20:03,440 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f51bdc360ee4fbe2f9447c9b6b4bf1ce/C of f51bdc360ee4fbe2f9447c9b6b4bf1ce into dd9308f78d924a5ba5edba4a73892050(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:20:03,440 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:20:03,440 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce., storeName=f51bdc360ee4fbe2f9447c9b6b4bf1ce/C, priority=13, startTime=1733617203326; duration=0sec 2024-12-08T00:20:03,440 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:03,440 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f51bdc360ee4fbe2f9447c9b6b4bf1ce:C 2024-12-08T00:20:03,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/cbca30b781ae4a7a880f87d71b39e663 is 50, key is test_row_0/B:col10/1733617202805/Put/seqid=0 2024-12-08T00:20:03,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741963_1139 (size=12301) 2024-12-08T00:20:03,477 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:03,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 267 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617263474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:03,480 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:03,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 266 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617263475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:03,482 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:03,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 267 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617263481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:03,579 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:03,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 269 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617263579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:03,582 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:03,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 268 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617263581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:03,585 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:03,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 269 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617263584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:03,784 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:03,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 271 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617263782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:03,785 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:03,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 270 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617263784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:03,788 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:03,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 271 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617263786, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:03,877 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=585 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/cbca30b781ae4a7a880f87d71b39e663 2024-12-08T00:20:03,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/f0e3e6500b194014a13f9d5f56c3b305 is 50, key is test_row_0/C:col10/1733617202805/Put/seqid=0 2024-12-08T00:20:03,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741964_1140 (size=12301) 2024-12-08T00:20:04,087 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:04,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 273 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617264085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:04,089 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:04,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 272 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617264088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:04,091 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:04,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 273 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617264091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:04,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-08T00:20:04,297 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:04,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 254 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52366 deadline: 1733617264295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:04,319 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=585 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/f0e3e6500b194014a13f9d5f56c3b305 2024-12-08T00:20:04,327 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/4a3ccf1af1d84af9ace55738e18a9d63 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/4a3ccf1af1d84af9ace55738e18a9d63 2024-12-08T00:20:04,337 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/4a3ccf1af1d84af9ace55738e18a9d63, entries=150, sequenceid=585, filesize=12.0 K 2024-12-08T00:20:04,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/cbca30b781ae4a7a880f87d71b39e663 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/cbca30b781ae4a7a880f87d71b39e663 2024-12-08T00:20:04,344 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/cbca30b781ae4a7a880f87d71b39e663, entries=150, sequenceid=585, filesize=12.0 K 2024-12-08T00:20:04,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/f0e3e6500b194014a13f9d5f56c3b305 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/f0e3e6500b194014a13f9d5f56c3b305 2024-12-08T00:20:04,351 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/f0e3e6500b194014a13f9d5f56c3b305, entries=150, sequenceid=585, filesize=12.0 K 2024-12-08T00:20:04,352 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=161.02 KB/164880 for f51bdc360ee4fbe2f9447c9b6b4bf1ce in 957ms, sequenceid=585, compaction requested=false 2024-12-08T00:20:04,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2538): Flush status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:20:04,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:20:04,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=29 2024-12-08T00:20:04,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4106): Remote procedure done, pid=29 2024-12-08T00:20:04,356 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=29, resume processing ppid=28 2024-12-08T00:20:04,356 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=29, ppid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2020 sec 2024-12-08T00:20:04,361 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees in 2.2100 sec 2024-12-08T00:20:04,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on f51bdc360ee4fbe2f9447c9b6b4bf1ce 2024-12-08T00:20:04,596 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f51bdc360ee4fbe2f9447c9b6b4bf1ce 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-12-08T00:20:04,596 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=A 2024-12-08T00:20:04,596 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:04,596 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=B 2024-12-08T00:20:04,596 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:04,596 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=C 2024-12-08T00:20:04,596 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:04,611 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/e555e56aada741a4ac6621043f7b9aa7 is 50, key is test_row_0/A:col10/1733617204594/Put/seqid=0 2024-12-08T00:20:04,616 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:04,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 278 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617264615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:04,617 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:04,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 277 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617264615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:04,621 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:04,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 276 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617264616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:04,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741965_1141 (size=12301) 2024-12-08T00:20:04,704 DEBUG [Thread-160 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x72e97e4b to 127.0.0.1:62287 2024-12-08T00:20:04,704 DEBUG [Thread-160 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:20:04,705 DEBUG [Thread-162 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x490457fd to 127.0.0.1:62287 2024-12-08T00:20:04,705 DEBUG [Thread-162 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:20:04,708 DEBUG [Thread-164 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2c8de680 to 127.0.0.1:62287 2024-12-08T00:20:04,709 DEBUG [Thread-164 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:20:04,710 DEBUG [Thread-166 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6f6b07e3 to 127.0.0.1:62287 2024-12-08T00:20:04,710 DEBUG [Thread-166 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:20:04,718 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:04,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 280 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617264717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:04,719 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:04,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 279 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617264719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:04,723 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:04,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 278 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617264723, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:04,920 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:04,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 282 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617264919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:04,921 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:04,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 281 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617264921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:04,924 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:04,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 280 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617264924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:05,047 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=615 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/e555e56aada741a4ac6621043f7b9aa7 2024-12-08T00:20:05,058 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/de8b5950ff6647b9aafea1b8a406479a is 50, key is test_row_0/B:col10/1733617204594/Put/seqid=0 2024-12-08T00:20:05,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741966_1142 (size=12301) 2024-12-08T00:20:05,072 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=615 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/de8b5950ff6647b9aafea1b8a406479a 2024-12-08T00:20:05,082 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/897b2018c0604e7c9a7814a18116a4f3 is 50, key is test_row_0/C:col10/1733617204594/Put/seqid=0 2024-12-08T00:20:05,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741967_1143 (size=12301) 2024-12-08T00:20:05,223 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:05,223 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:05,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 283 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52350 deadline: 1733617265223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:05,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 284 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52410 deadline: 1733617265223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:05,228 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:05,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 282 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52342 deadline: 1733617265228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:05,491 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=615 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/897b2018c0604e7c9a7814a18116a4f3 2024-12-08T00:20:05,496 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/e555e56aada741a4ac6621043f7b9aa7 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/e555e56aada741a4ac6621043f7b9aa7 2024-12-08T00:20:05,500 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/e555e56aada741a4ac6621043f7b9aa7, entries=150, sequenceid=615, filesize=12.0 K 2024-12-08T00:20:05,501 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/de8b5950ff6647b9aafea1b8a406479a as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/de8b5950ff6647b9aafea1b8a406479a 2024-12-08T00:20:05,505 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/de8b5950ff6647b9aafea1b8a406479a, entries=150, sequenceid=615, filesize=12.0 K 2024-12-08T00:20:05,506 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/897b2018c0604e7c9a7814a18116a4f3 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/897b2018c0604e7c9a7814a18116a4f3 2024-12-08T00:20:05,510 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/897b2018c0604e7c9a7814a18116a4f3, entries=150, sequenceid=615, filesize=12.0 K 2024-12-08T00:20:05,511 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=33.54 KB/34350 for f51bdc360ee4fbe2f9447c9b6b4bf1ce in 916ms, sequenceid=615, compaction requested=true 2024-12-08T00:20:05,511 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:20:05,511 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f51bdc360ee4fbe2f9447c9b6b4bf1ce:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:20:05,511 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:05,511 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:20:05,512 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f51bdc360ee4fbe2f9447c9b6b4bf1ce:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T00:20:05,512 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:05,512 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:20:05,512 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f51bdc360ee4fbe2f9447c9b6b4bf1ce:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:20:05,512 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:20:05,513 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38265 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:20:05,513 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38265 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:20:05,513 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): f51bdc360ee4fbe2f9447c9b6b4bf1ce/A is initiating minor compaction (all files) 2024-12-08T00:20:05,513 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): f51bdc360ee4fbe2f9447c9b6b4bf1ce/B is initiating minor compaction (all files) 2024-12-08T00:20:05,513 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f51bdc360ee4fbe2f9447c9b6b4bf1ce/A in TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:20:05,513 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f51bdc360ee4fbe2f9447c9b6b4bf1ce/B in TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:20:05,513 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/4195ec84c5b54f74ace2d4da36080fc7, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/cbca30b781ae4a7a880f87d71b39e663, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/de8b5950ff6647b9aafea1b8a406479a] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp, totalSize=37.4 K 2024-12-08T00:20:05,513 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/9096e6397f1d4a08ac404789f917a341, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/4a3ccf1af1d84af9ace55738e18a9d63, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/e555e56aada741a4ac6621043f7b9aa7] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp, totalSize=37.4 K 2024-12-08T00:20:05,513 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9096e6397f1d4a08ac404789f917a341, keycount=150, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=573, earliestPutTs=1733617202174 2024-12-08T00:20:05,513 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 4195ec84c5b54f74ace2d4da36080fc7, keycount=150, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=573, earliestPutTs=1733617202174 2024-12-08T00:20:05,514 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4a3ccf1af1d84af9ace55738e18a9d63, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=585, earliestPutTs=1733617202805 2024-12-08T00:20:05,514 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting cbca30b781ae4a7a880f87d71b39e663, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=585, earliestPutTs=1733617202805 2024-12-08T00:20:05,514 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting e555e56aada741a4ac6621043f7b9aa7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=615, earliestPutTs=1733617203473 2024-12-08T00:20:05,514 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting de8b5950ff6647b9aafea1b8a406479a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=615, earliestPutTs=1733617203473 2024-12-08T00:20:05,523 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f51bdc360ee4fbe2f9447c9b6b4bf1ce#B#compaction#129 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:20:05,523 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/9402fe6c86c9431dbee59e7bf4c6c8f3 is 50, key is test_row_0/B:col10/1733617204594/Put/seqid=0 2024-12-08T00:20:05,525 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f51bdc360ee4fbe2f9447c9b6b4bf1ce#A#compaction#130 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:20:05,525 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/8a63aac20cd846dd9c93b520e435858c is 50, key is test_row_0/A:col10/1733617204594/Put/seqid=0 2024-12-08T00:20:05,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741968_1144 (size=13765) 2024-12-08T00:20:05,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741969_1145 (size=13765) 2024-12-08T00:20:05,534 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/8a63aac20cd846dd9c93b520e435858c as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/8a63aac20cd846dd9c93b520e435858c 2024-12-08T00:20:05,539 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f51bdc360ee4fbe2f9447c9b6b4bf1ce/A of f51bdc360ee4fbe2f9447c9b6b4bf1ce into 8a63aac20cd846dd9c93b520e435858c(size=13.4 K), total size for store is 13.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:20:05,539 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:20:05,539 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce., storeName=f51bdc360ee4fbe2f9447c9b6b4bf1ce/A, priority=13, startTime=1733617205511; duration=0sec 2024-12-08T00:20:05,539 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:20:05,539 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f51bdc360ee4fbe2f9447c9b6b4bf1ce:A 2024-12-08T00:20:05,539 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:20:05,540 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38265 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:20:05,540 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): f51bdc360ee4fbe2f9447c9b6b4bf1ce/C is initiating minor compaction (all files) 2024-12-08T00:20:05,540 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f51bdc360ee4fbe2f9447c9b6b4bf1ce/C in TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:20:05,540 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/dd9308f78d924a5ba5edba4a73892050, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/f0e3e6500b194014a13f9d5f56c3b305, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/897b2018c0604e7c9a7814a18116a4f3] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp, totalSize=37.4 K 2024-12-08T00:20:05,541 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting dd9308f78d924a5ba5edba4a73892050, keycount=150, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=573, earliestPutTs=1733617202174 2024-12-08T00:20:05,541 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting f0e3e6500b194014a13f9d5f56c3b305, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=585, earliestPutTs=1733617202805 2024-12-08T00:20:05,542 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 897b2018c0604e7c9a7814a18116a4f3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=615, earliestPutTs=1733617203473 2024-12-08T00:20:05,548 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f51bdc360ee4fbe2f9447c9b6b4bf1ce#C#compaction#131 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:20:05,549 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/7d13e8bef5a64c3da0137c93fb7bd954 is 50, key is test_row_0/C:col10/1733617204594/Put/seqid=0 2024-12-08T00:20:05,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741970_1146 (size=13765) 2024-12-08T00:20:05,728 DEBUG [Thread-153 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x04977266 to 127.0.0.1:62287 2024-12-08T00:20:05,729 DEBUG [Thread-153 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:20:05,731 DEBUG [Thread-151 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x72b32f98 to 127.0.0.1:62287 2024-12-08T00:20:05,731 DEBUG [Thread-151 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:20:05,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on f51bdc360ee4fbe2f9447c9b6b4bf1ce 2024-12-08T00:20:05,733 DEBUG [Thread-157 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x18603bb9 to 127.0.0.1:62287 2024-12-08T00:20:05,733 DEBUG [Thread-157 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:20:05,736 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f51bdc360ee4fbe2f9447c9b6b4bf1ce 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-08T00:20:05,736 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=A 2024-12-08T00:20:05,737 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:05,737 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=B 2024-12-08T00:20:05,737 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:05,737 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=C 2024-12-08T00:20:05,737 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:05,742 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/65ed81ef152d46e9b84a7531a3679e41 is 50, key is test_row_0/A:col10/1733617205731/Put/seqid=0 2024-12-08T00:20:05,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741971_1147 (size=12301) 2024-12-08T00:20:05,934 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/9402fe6c86c9431dbee59e7bf4c6c8f3 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/9402fe6c86c9431dbee59e7bf4c6c8f3 2024-12-08T00:20:05,940 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f51bdc360ee4fbe2f9447c9b6b4bf1ce/B of f51bdc360ee4fbe2f9447c9b6b4bf1ce into 9402fe6c86c9431dbee59e7bf4c6c8f3(size=13.4 K), total size for store is 13.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:20:05,940 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:20:05,940 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce., storeName=f51bdc360ee4fbe2f9447c9b6b4bf1ce/B, priority=13, startTime=1733617205511; duration=0sec 2024-12-08T00:20:05,940 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:05,940 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f51bdc360ee4fbe2f9447c9b6b4bf1ce:B 2024-12-08T00:20:05,958 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/7d13e8bef5a64c3da0137c93fb7bd954 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/7d13e8bef5a64c3da0137c93fb7bd954 2024-12-08T00:20:05,964 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f51bdc360ee4fbe2f9447c9b6b4bf1ce/C of f51bdc360ee4fbe2f9447c9b6b4bf1ce into 7d13e8bef5a64c3da0137c93fb7bd954(size=13.4 K), total size for store is 13.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:20:05,964 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:20:05,964 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce., storeName=f51bdc360ee4fbe2f9447c9b6b4bf1ce/C, priority=13, startTime=1733617205512; duration=0sec 2024-12-08T00:20:05,964 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:05,964 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f51bdc360ee4fbe2f9447c9b6b4bf1ce:C 2024-12-08T00:20:06,154 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=627 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/65ed81ef152d46e9b84a7531a3679e41 2024-12-08T00:20:06,163 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/6fdafc79d97d42d4b9915e960fb49d0f is 50, key is test_row_0/B:col10/1733617205731/Put/seqid=0 2024-12-08T00:20:06,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741972_1148 (size=12301) 2024-12-08T00:20:06,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-08T00:20:06,257 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 28 completed 2024-12-08T00:20:06,310 DEBUG [Thread-155 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6bbb5d8a to 127.0.0.1:62287 2024-12-08T00:20:06,310 DEBUG [Thread-155 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:20:06,568 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=627 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/6fdafc79d97d42d4b9915e960fb49d0f 2024-12-08T00:20:06,584 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/0ba7c3e255c74cc38c393b8fc8fdcd20 is 50, key is test_row_0/C:col10/1733617205731/Put/seqid=0 2024-12-08T00:20:06,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741973_1149 (size=12301) 2024-12-08T00:20:06,992 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=627 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/0ba7c3e255c74cc38c393b8fc8fdcd20 2024-12-08T00:20:06,997 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/65ed81ef152d46e9b84a7531a3679e41 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/65ed81ef152d46e9b84a7531a3679e41 2024-12-08T00:20:07,000 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/65ed81ef152d46e9b84a7531a3679e41, entries=150, sequenceid=627, filesize=12.0 K 2024-12-08T00:20:07,001 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/6fdafc79d97d42d4b9915e960fb49d0f as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/6fdafc79d97d42d4b9915e960fb49d0f 2024-12-08T00:20:07,005 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/6fdafc79d97d42d4b9915e960fb49d0f, entries=150, sequenceid=627, filesize=12.0 K 2024-12-08T00:20:07,006 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/0ba7c3e255c74cc38c393b8fc8fdcd20 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/0ba7c3e255c74cc38c393b8fc8fdcd20 2024-12-08T00:20:07,010 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/0ba7c3e255c74cc38c393b8fc8fdcd20, entries=150, sequenceid=627, filesize=12.0 K 2024-12-08T00:20:07,011 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=6.71 KB/6870 for f51bdc360ee4fbe2f9447c9b6b4bf1ce in 1275ms, sequenceid=627, compaction requested=false 2024-12-08T00:20:07,011 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:20:07,704 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-08T00:20:12,038 DEBUG [Thread-149 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x12885408 to 127.0.0.1:62287 2024-12-08T00:20:12,038 DEBUG [Thread-149 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:20:12,038 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-08T00:20:12,039 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 48 2024-12-08T00:20:12,039 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 115 2024-12-08T00:20:12,039 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 116 2024-12-08T00:20:12,039 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 102 2024-12-08T00:20:12,039 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 114 2024-12-08T00:20:12,039 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-08T00:20:12,039 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4249 2024-12-08T00:20:12,039 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4170 2024-12-08T00:20:12,039 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-08T00:20:12,039 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1887 2024-12-08T00:20:12,039 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5661 rows 2024-12-08T00:20:12,039 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1888 2024-12-08T00:20:12,039 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5662 rows 2024-12-08T00:20:12,039 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-08T00:20:12,039 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0e98ea32 to 127.0.0.1:62287 2024-12-08T00:20:12,039 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:20:12,046 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-08T00:20:12,049 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-08T00:20:12,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=30, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-08T00:20:12,057 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733617212057"}]},"ts":"1733617212057"} 2024-12-08T00:20:12,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-08T00:20:12,058 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-08T00:20:12,061 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-08T00:20:12,063 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=31, ppid=30, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-08T00:20:12,067 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=32, ppid=31, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=f51bdc360ee4fbe2f9447c9b6b4bf1ce, UNASSIGN}] 2024-12-08T00:20:12,067 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=32, ppid=31, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=f51bdc360ee4fbe2f9447c9b6b4bf1ce, UNASSIGN 2024-12-08T00:20:12,068 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=32 updating hbase:meta row=f51bdc360ee4fbe2f9447c9b6b4bf1ce, regionState=CLOSING, regionLocation=017dd09fb407,36703,1733617179335 2024-12-08T00:20:12,069 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-08T00:20:12,069 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE; CloseRegionProcedure f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335}] 2024-12-08T00:20:12,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-08T00:20:12,224 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:12,226 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] handler.UnassignRegionHandler(124): Close f51bdc360ee4fbe2f9447c9b6b4bf1ce 2024-12-08T00:20:12,226 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-08T00:20:12,227 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1681): Closing f51bdc360ee4fbe2f9447c9b6b4bf1ce, disabling compactions & flushes 2024-12-08T00:20:12,227 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:20:12,227 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:20:12,227 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. after waiting 0 ms 2024-12-08T00:20:12,227 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:20:12,227 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(2837): Flushing f51bdc360ee4fbe2f9447c9b6b4bf1ce 3/3 column families, dataSize=13.42 KB heapSize=35.91 KB 2024-12-08T00:20:12,228 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=A 2024-12-08T00:20:12,228 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:12,228 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=B 2024-12-08T00:20:12,228 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:12,228 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f51bdc360ee4fbe2f9447c9b6b4bf1ce, store=C 2024-12-08T00:20:12,228 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:12,233 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/b5433650ada84ed8af920c746ec1d0ac is 50, key is test_row_1/A:col10/1733617206309/Put/seqid=0 2024-12-08T00:20:12,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741974_1150 (size=9857) 2024-12-08T00:20:12,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-08T00:20:12,639 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.47 KB at sequenceid=634 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/b5433650ada84ed8af920c746ec1d0ac 2024-12-08T00:20:12,649 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/946218c86d2b40b88760838acc32f4e7 is 50, key is test_row_1/B:col10/1733617206309/Put/seqid=0 2024-12-08T00:20:12,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741975_1151 (size=9857) 2024-12-08T00:20:12,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-08T00:20:13,016 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-08T00:20:13,018 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59106, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-08T00:20:13,054 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.47 KB at sequenceid=634 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/946218c86d2b40b88760838acc32f4e7 2024-12-08T00:20:13,062 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/d881dd8234104bc0beffe893754d3f86 is 50, key is test_row_1/C:col10/1733617206309/Put/seqid=0 2024-12-08T00:20:13,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741976_1152 (size=9857) 2024-12-08T00:20:13,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-08T00:20:13,466 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.47 KB at sequenceid=634 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/d881dd8234104bc0beffe893754d3f86 2024-12-08T00:20:13,471 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/A/b5433650ada84ed8af920c746ec1d0ac as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/b5433650ada84ed8af920c746ec1d0ac 2024-12-08T00:20:13,475 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/b5433650ada84ed8af920c746ec1d0ac, entries=100, sequenceid=634, filesize=9.6 K 2024-12-08T00:20:13,476 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/B/946218c86d2b40b88760838acc32f4e7 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/946218c86d2b40b88760838acc32f4e7 2024-12-08T00:20:13,479 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/946218c86d2b40b88760838acc32f4e7, entries=100, sequenceid=634, filesize=9.6 K 2024-12-08T00:20:13,480 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/.tmp/C/d881dd8234104bc0beffe893754d3f86 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/d881dd8234104bc0beffe893754d3f86 2024-12-08T00:20:13,484 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/d881dd8234104bc0beffe893754d3f86, entries=100, sequenceid=634, filesize=9.6 K 2024-12-08T00:20:13,484 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(3040): Finished flush of dataSize ~13.42 KB/13740, heapSize ~35.86 KB/36720, currentSize=0 B/0 for f51bdc360ee4fbe2f9447c9b6b4bf1ce in 1257ms, sequenceid=634, compaction requested=true 2024-12-08T00:20:13,485 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/a2ccce83506a44f0880725901a10cce4, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/27cd56b0e84645e682fd1bd7f7c70b20, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/4ebb49454477423c9c9f8d2357344e45, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/0a9061e76bda4f7bb2b81688e43f7c9d, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/906f61f5d8ed43428cee8080ab3600d9, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/a84c4b1734af4a1e94767cd7f993710e, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/33f5fe8267cb4457bf076730ceb4a7c4, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/a46dcd7e950e43d9992bad05849d0163, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/93290e03f0ae4b45aae4a8ead8e14b01, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/c9235e68ccd647b0b8fdab6476adebe9, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/2d9c19bcbf204979882daa2807f28543, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/fb6534abf1af4a84b99b61d6856ccb8f, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/67be1088180440c68f2c4cc737509903, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/4fcdb696a5c04d1484053e5cf90c77c2, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/38fda6f06e754de4844907e9b1b8d450, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/3ce62155b8fa4017b67e85bf5d34d30d, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/e6930d40c316458e96a1eb28692e6e7b, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/66ce41fe24bd407fb9a5898663514c9f, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/38d5c2aac3cf4d66bb46a5560754c4be, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/a9edcfb5e3e04c009e7cd960c9bf6a3b, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/02af179e9cd246c19f3749cd3edf85ae, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/96350ff26757466db694d8a36703c034, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/28adb39d75bd4e3db3d9960ac8945480, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/fc54cc31fcc94a849392b16d44f09485, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/8cd98ebeb19b44fab912e8a0a098b984, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/9d84716e837a4ea990ebc01244fdfc12, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/7228392654814d52a611c1f3fc840986, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/72260f70a227487faf798be4720d71c9, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/2ce7122e951f4aca96653673353b66eb, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/68b9eb5136d44b618618266bfaba04f8, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/bba088cde4fd422bbb1f622ef3dff888, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/dd2c1df64d38483095b81c45b60a62dd, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/6297d1924d4e4a3ebdc3d45c9452fa7c, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/430086fe322f40079827c6865d1ecd1c, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/955fb05a0bfb465e9efacb15027c909f, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/c64489cc39a74e6bae270594b4659167, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/52cf7090a8304833b67185b66b8fd5fc, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/94af88b184844c54a18e270d9922e48f, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/e3f9823138ff4b6dbdd8741edb5b9fdd, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/3804da64924240e1b7b6039917e4e026, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/9096e6397f1d4a08ac404789f917a341, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/4a3ccf1af1d84af9ace55738e18a9d63, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/e555e56aada741a4ac6621043f7b9aa7] to archive 2024-12-08T00:20:13,488 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-08T00:20:13,494 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/a2ccce83506a44f0880725901a10cce4 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/a2ccce83506a44f0880725901a10cce4 2024-12-08T00:20:13,496 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/27cd56b0e84645e682fd1bd7f7c70b20 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/27cd56b0e84645e682fd1bd7f7c70b20 2024-12-08T00:20:13,497 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/4ebb49454477423c9c9f8d2357344e45 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/4ebb49454477423c9c9f8d2357344e45 2024-12-08T00:20:13,498 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/0a9061e76bda4f7bb2b81688e43f7c9d to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/0a9061e76bda4f7bb2b81688e43f7c9d 2024-12-08T00:20:13,499 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/906f61f5d8ed43428cee8080ab3600d9 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/906f61f5d8ed43428cee8080ab3600d9 2024-12-08T00:20:13,501 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/a84c4b1734af4a1e94767cd7f993710e to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/a84c4b1734af4a1e94767cd7f993710e 2024-12-08T00:20:13,502 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/33f5fe8267cb4457bf076730ceb4a7c4 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/33f5fe8267cb4457bf076730ceb4a7c4 2024-12-08T00:20:13,503 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/a46dcd7e950e43d9992bad05849d0163 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/a46dcd7e950e43d9992bad05849d0163 2024-12-08T00:20:13,505 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/93290e03f0ae4b45aae4a8ead8e14b01 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/93290e03f0ae4b45aae4a8ead8e14b01 2024-12-08T00:20:13,506 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/c9235e68ccd647b0b8fdab6476adebe9 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/c9235e68ccd647b0b8fdab6476adebe9 2024-12-08T00:20:13,507 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/2d9c19bcbf204979882daa2807f28543 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/2d9c19bcbf204979882daa2807f28543 2024-12-08T00:20:13,508 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/fb6534abf1af4a84b99b61d6856ccb8f to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/fb6534abf1af4a84b99b61d6856ccb8f 2024-12-08T00:20:13,509 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/67be1088180440c68f2c4cc737509903 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/67be1088180440c68f2c4cc737509903 2024-12-08T00:20:13,511 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/4fcdb696a5c04d1484053e5cf90c77c2 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/4fcdb696a5c04d1484053e5cf90c77c2 2024-12-08T00:20:13,512 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/38fda6f06e754de4844907e9b1b8d450 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/38fda6f06e754de4844907e9b1b8d450 2024-12-08T00:20:13,513 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/3ce62155b8fa4017b67e85bf5d34d30d to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/3ce62155b8fa4017b67e85bf5d34d30d 2024-12-08T00:20:13,515 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/e6930d40c316458e96a1eb28692e6e7b to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/e6930d40c316458e96a1eb28692e6e7b 2024-12-08T00:20:13,516 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/66ce41fe24bd407fb9a5898663514c9f to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/66ce41fe24bd407fb9a5898663514c9f 2024-12-08T00:20:13,517 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/38d5c2aac3cf4d66bb46a5560754c4be to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/38d5c2aac3cf4d66bb46a5560754c4be 2024-12-08T00:20:13,518 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/a9edcfb5e3e04c009e7cd960c9bf6a3b to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/a9edcfb5e3e04c009e7cd960c9bf6a3b 2024-12-08T00:20:13,520 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/02af179e9cd246c19f3749cd3edf85ae to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/02af179e9cd246c19f3749cd3edf85ae 2024-12-08T00:20:13,521 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/96350ff26757466db694d8a36703c034 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/96350ff26757466db694d8a36703c034 2024-12-08T00:20:13,522 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/28adb39d75bd4e3db3d9960ac8945480 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/28adb39d75bd4e3db3d9960ac8945480 2024-12-08T00:20:13,523 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/fc54cc31fcc94a849392b16d44f09485 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/fc54cc31fcc94a849392b16d44f09485 2024-12-08T00:20:13,525 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/8cd98ebeb19b44fab912e8a0a098b984 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/8cd98ebeb19b44fab912e8a0a098b984 2024-12-08T00:20:13,526 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/9d84716e837a4ea990ebc01244fdfc12 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/9d84716e837a4ea990ebc01244fdfc12 2024-12-08T00:20:13,527 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/7228392654814d52a611c1f3fc840986 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/7228392654814d52a611c1f3fc840986 2024-12-08T00:20:13,529 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/72260f70a227487faf798be4720d71c9 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/72260f70a227487faf798be4720d71c9 2024-12-08T00:20:13,530 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/2ce7122e951f4aca96653673353b66eb to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/2ce7122e951f4aca96653673353b66eb 2024-12-08T00:20:13,531 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/68b9eb5136d44b618618266bfaba04f8 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/68b9eb5136d44b618618266bfaba04f8 2024-12-08T00:20:13,532 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/bba088cde4fd422bbb1f622ef3dff888 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/bba088cde4fd422bbb1f622ef3dff888 2024-12-08T00:20:13,534 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/dd2c1df64d38483095b81c45b60a62dd to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/dd2c1df64d38483095b81c45b60a62dd 2024-12-08T00:20:13,535 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/6297d1924d4e4a3ebdc3d45c9452fa7c to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/6297d1924d4e4a3ebdc3d45c9452fa7c 2024-12-08T00:20:13,536 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/430086fe322f40079827c6865d1ecd1c to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/430086fe322f40079827c6865d1ecd1c 2024-12-08T00:20:13,537 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/955fb05a0bfb465e9efacb15027c909f to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/955fb05a0bfb465e9efacb15027c909f 2024-12-08T00:20:13,539 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/c64489cc39a74e6bae270594b4659167 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/c64489cc39a74e6bae270594b4659167 2024-12-08T00:20:13,540 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/52cf7090a8304833b67185b66b8fd5fc to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/52cf7090a8304833b67185b66b8fd5fc 2024-12-08T00:20:13,541 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/94af88b184844c54a18e270d9922e48f to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/94af88b184844c54a18e270d9922e48f 2024-12-08T00:20:13,542 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/e3f9823138ff4b6dbdd8741edb5b9fdd to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/e3f9823138ff4b6dbdd8741edb5b9fdd 2024-12-08T00:20:13,545 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/3804da64924240e1b7b6039917e4e026 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/3804da64924240e1b7b6039917e4e026 2024-12-08T00:20:13,546 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/9096e6397f1d4a08ac404789f917a341 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/9096e6397f1d4a08ac404789f917a341 2024-12-08T00:20:13,548 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/4a3ccf1af1d84af9ace55738e18a9d63 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/4a3ccf1af1d84af9ace55738e18a9d63 2024-12-08T00:20:13,549 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/e555e56aada741a4ac6621043f7b9aa7 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/e555e56aada741a4ac6621043f7b9aa7 2024-12-08T00:20:13,565 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/34263715e7514fa3a03d6d00b493a229, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/25990c73463446d79af966e66908fda8, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/e7ef52cd1e7e4f259459431eae90125e, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/e7c38e344d2f44a0b1a280bb5532a5be, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/ef141dd317d34df090a18b52c92eb45b, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/8db4f0b2f6404cb08d3af10f6fd3c581, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/ee57da1dcc434ae5bc26b95a4e2bdeb2, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/80f1338eaa424c63a5d6d69c432f0583, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/e8cb97cd06d34b2bab3134bad0347626, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/024fd7ca317f41b78c386ae470eb7aac, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/d43df4e6c79a4fefa00016178a8e2edd, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/e816c33803f04d6a8728460ae604ebfd, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/5638ff936e1c4a6199753c2cbb1b815c, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/b517753406944e03ae20dd274b521c97, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/6d7edaba62404c239b64232790464ad3, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/2c07171306f746f4a3ebf590c38d25ec, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/6fce2c86f35c495a8d98df092f937f32, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/c65afcf7ebb6456c9b70028fd26b0bf1, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/3f1d76e9ef494fbebe5358e008a5bd34, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/5e918bcb3fc249d8812417ddcc83aaa4, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/bc4420dc7542420c9c42a493a7d2245a, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/54323b62a2d9426383eff9ae52cf8310, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/e9aaea17185e42e4bb389afd901d12b1, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/3493666867664a77aa8e78ef535cd066, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/8530760c6ecf43f085838dff0e2e304e, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/ff6e7b067a954e819c06700d201e2bf3, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/da5e01ef9ad74ce98741b1dc8f54cc7e, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/efcca6f3d2074cb9ac2c28db5f5d7989, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/201fec840e7a41c09f3ddcebeb6fd2e4, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/bde716422687429d996a1469e0dc538a, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/44a2cc0eae884d93906dba425efb5846, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/f10674e608904e2cbff33c3483ba8a43, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/5000826884534879b553a1cb8b3fad47, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/b9af2301230c43ab9fde757001e2c137, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/b6bc62720ae84e78a7c9b62c73427edc, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/52eae42e046c4a75b90199db3f8bb77a, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/6730f7d1a5c3491badc1d08c402c0e2b, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/f7cb0d068a954bdd9b8241ddfeedfc17, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/531aff85df194ab7ba3a171fb7020d05, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/4195ec84c5b54f74ace2d4da36080fc7, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/b172fdd37ad24c0c8627b95683df1af2, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/cbca30b781ae4a7a880f87d71b39e663, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/de8b5950ff6647b9aafea1b8a406479a] to archive 2024-12-08T00:20:13,567 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-08T00:20:13,569 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/34263715e7514fa3a03d6d00b493a229 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/34263715e7514fa3a03d6d00b493a229 2024-12-08T00:20:13,570 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/25990c73463446d79af966e66908fda8 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/25990c73463446d79af966e66908fda8 2024-12-08T00:20:13,572 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/e7ef52cd1e7e4f259459431eae90125e to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/e7ef52cd1e7e4f259459431eae90125e 2024-12-08T00:20:13,573 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/e7c38e344d2f44a0b1a280bb5532a5be to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/e7c38e344d2f44a0b1a280bb5532a5be 2024-12-08T00:20:13,575 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/ef141dd317d34df090a18b52c92eb45b to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/ef141dd317d34df090a18b52c92eb45b 2024-12-08T00:20:13,576 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/8db4f0b2f6404cb08d3af10f6fd3c581 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/8db4f0b2f6404cb08d3af10f6fd3c581 2024-12-08T00:20:13,577 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/ee57da1dcc434ae5bc26b95a4e2bdeb2 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/ee57da1dcc434ae5bc26b95a4e2bdeb2 2024-12-08T00:20:13,578 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/80f1338eaa424c63a5d6d69c432f0583 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/80f1338eaa424c63a5d6d69c432f0583 2024-12-08T00:20:13,580 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/e8cb97cd06d34b2bab3134bad0347626 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/e8cb97cd06d34b2bab3134bad0347626 2024-12-08T00:20:13,582 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/024fd7ca317f41b78c386ae470eb7aac to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/024fd7ca317f41b78c386ae470eb7aac 2024-12-08T00:20:13,583 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/d43df4e6c79a4fefa00016178a8e2edd to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/d43df4e6c79a4fefa00016178a8e2edd 2024-12-08T00:20:13,584 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/e816c33803f04d6a8728460ae604ebfd to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/e816c33803f04d6a8728460ae604ebfd 2024-12-08T00:20:13,585 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/5638ff936e1c4a6199753c2cbb1b815c to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/5638ff936e1c4a6199753c2cbb1b815c 2024-12-08T00:20:13,587 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/b517753406944e03ae20dd274b521c97 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/b517753406944e03ae20dd274b521c97 2024-12-08T00:20:13,588 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/6d7edaba62404c239b64232790464ad3 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/6d7edaba62404c239b64232790464ad3 2024-12-08T00:20:13,589 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/2c07171306f746f4a3ebf590c38d25ec to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/2c07171306f746f4a3ebf590c38d25ec 2024-12-08T00:20:13,590 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/6fce2c86f35c495a8d98df092f937f32 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/6fce2c86f35c495a8d98df092f937f32 2024-12-08T00:20:13,592 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/c65afcf7ebb6456c9b70028fd26b0bf1 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/c65afcf7ebb6456c9b70028fd26b0bf1 2024-12-08T00:20:13,593 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/3f1d76e9ef494fbebe5358e008a5bd34 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/3f1d76e9ef494fbebe5358e008a5bd34 2024-12-08T00:20:13,594 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/5e918bcb3fc249d8812417ddcc83aaa4 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/5e918bcb3fc249d8812417ddcc83aaa4 2024-12-08T00:20:13,595 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/bc4420dc7542420c9c42a493a7d2245a to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/bc4420dc7542420c9c42a493a7d2245a 2024-12-08T00:20:13,597 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/54323b62a2d9426383eff9ae52cf8310 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/54323b62a2d9426383eff9ae52cf8310 2024-12-08T00:20:13,598 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/e9aaea17185e42e4bb389afd901d12b1 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/e9aaea17185e42e4bb389afd901d12b1 2024-12-08T00:20:13,600 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/3493666867664a77aa8e78ef535cd066 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/3493666867664a77aa8e78ef535cd066 2024-12-08T00:20:13,602 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/8530760c6ecf43f085838dff0e2e304e to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/8530760c6ecf43f085838dff0e2e304e 2024-12-08T00:20:13,604 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/ff6e7b067a954e819c06700d201e2bf3 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/ff6e7b067a954e819c06700d201e2bf3 2024-12-08T00:20:13,605 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/da5e01ef9ad74ce98741b1dc8f54cc7e to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/da5e01ef9ad74ce98741b1dc8f54cc7e 2024-12-08T00:20:13,606 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/efcca6f3d2074cb9ac2c28db5f5d7989 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/efcca6f3d2074cb9ac2c28db5f5d7989 2024-12-08T00:20:13,608 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/201fec840e7a41c09f3ddcebeb6fd2e4 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/201fec840e7a41c09f3ddcebeb6fd2e4 2024-12-08T00:20:13,609 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/bde716422687429d996a1469e0dc538a to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/bde716422687429d996a1469e0dc538a 2024-12-08T00:20:13,610 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/44a2cc0eae884d93906dba425efb5846 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/44a2cc0eae884d93906dba425efb5846 2024-12-08T00:20:13,611 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/f10674e608904e2cbff33c3483ba8a43 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/f10674e608904e2cbff33c3483ba8a43 2024-12-08T00:20:13,612 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/5000826884534879b553a1cb8b3fad47 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/5000826884534879b553a1cb8b3fad47 2024-12-08T00:20:13,613 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/b9af2301230c43ab9fde757001e2c137 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/b9af2301230c43ab9fde757001e2c137 2024-12-08T00:20:13,614 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/b6bc62720ae84e78a7c9b62c73427edc to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/b6bc62720ae84e78a7c9b62c73427edc 2024-12-08T00:20:13,615 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/52eae42e046c4a75b90199db3f8bb77a to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/52eae42e046c4a75b90199db3f8bb77a 2024-12-08T00:20:13,616 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/6730f7d1a5c3491badc1d08c402c0e2b to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/6730f7d1a5c3491badc1d08c402c0e2b 2024-12-08T00:20:13,618 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/f7cb0d068a954bdd9b8241ddfeedfc17 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/f7cb0d068a954bdd9b8241ddfeedfc17 2024-12-08T00:20:13,619 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/531aff85df194ab7ba3a171fb7020d05 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/531aff85df194ab7ba3a171fb7020d05 2024-12-08T00:20:13,620 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/4195ec84c5b54f74ace2d4da36080fc7 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/4195ec84c5b54f74ace2d4da36080fc7 2024-12-08T00:20:13,622 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/b172fdd37ad24c0c8627b95683df1af2 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/b172fdd37ad24c0c8627b95683df1af2 2024-12-08T00:20:13,623 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/cbca30b781ae4a7a880f87d71b39e663 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/cbca30b781ae4a7a880f87d71b39e663 2024-12-08T00:20:13,624 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/de8b5950ff6647b9aafea1b8a406479a to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/de8b5950ff6647b9aafea1b8a406479a 2024-12-08T00:20:13,626 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/190177e2c3354f6e81321ef8501cc766, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/e83342bcdb7f40609d2ba7934abdf42a, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/03e467cdefc84c2bb49ccdf542c47dfe, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/0e3841fdc2124902a987c8ded69b9812, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/6b4e2b668da54e6793ef2403aa431c92, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/38b8a951fea14cd1a7f50f3e0861c9a6, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/b37427017efa4c43ab8eb81797cb59c4, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/4ed70870fd3741f6b7be77672e3a7661, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/66b0803397924580960c1f80670039fe, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/f65b042ddb434df2a6bd18f246702af5, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/9cc72c7b3a444e189f335f8dfd973e7d, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/9bb8d29d4a7e4bdc81782d10323bde68, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/0b7f11a226c748769542081466054dbe, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/68286d3cfb694c90b6063d905b2c5e21, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/d1d4b8a11777466a941914cf59357873, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/6199ad78b6c9435990cdeb693b878c92, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/73b1c1bd3da54031b59f9aa5734d7f48, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/05574ef9617549da989183ef24de0bf8, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/2856c69bad0b40388efa3f9f535536d7, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/433891a92b18436091211bdd1d7f3c9b, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/4c7581889ec34f7cbbb6c432e2cc434c, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/3e9c9457683a40aa9042f216885f8b13, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/6c1c4bd6a60547a9864c4515dc49baa9, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/33ebae3e9de04ed6b8862f3015d1f62a, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/5fee7ba809d5448bb2a6fce7c8f8a9bd, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/3eeb460a8b6a4849afa19787848da2c6, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/ffab165b63f44afe8199ca47a303a6d6, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/5958e86bb12d42eb8f5019d032f1ade1, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/33fd8157b04b41e4aac271a721f25bfd, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/028745e00a8c4d3b93d6ad4f179f36bc, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/1604de5eb43c41bba57be8d23847ba67, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/dccfbc395d1b47ebbcd5e9353f505cb6, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/b34677252a3147b1bed3999ceaef7942, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/e25ad532713b40819f1c846fe0e6d975, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/460a97921ae54770b5ca84685b067a51, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/908ab1a013ed4c8c9211d1ef1dbfdff8, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/43b2105185dd43ccbd21d931abc68267, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/aefaddc7b3f649ec822d47ebce71a491, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/1cf7e5c858194411b4ec57eb9641163c, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/dd9308f78d924a5ba5edba4a73892050, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/5b6fee2eb08249928c46086e3cb18c01, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/f0e3e6500b194014a13f9d5f56c3b305, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/897b2018c0604e7c9a7814a18116a4f3] to archive 2024-12-08T00:20:13,627 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-08T00:20:13,629 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/190177e2c3354f6e81321ef8501cc766 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/190177e2c3354f6e81321ef8501cc766 2024-12-08T00:20:13,630 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/e83342bcdb7f40609d2ba7934abdf42a to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/e83342bcdb7f40609d2ba7934abdf42a 2024-12-08T00:20:13,632 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/03e467cdefc84c2bb49ccdf542c47dfe to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/03e467cdefc84c2bb49ccdf542c47dfe 2024-12-08T00:20:13,633 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/0e3841fdc2124902a987c8ded69b9812 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/0e3841fdc2124902a987c8ded69b9812 2024-12-08T00:20:13,634 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/6b4e2b668da54e6793ef2403aa431c92 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/6b4e2b668da54e6793ef2403aa431c92 2024-12-08T00:20:13,636 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/38b8a951fea14cd1a7f50f3e0861c9a6 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/38b8a951fea14cd1a7f50f3e0861c9a6 2024-12-08T00:20:13,637 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/b37427017efa4c43ab8eb81797cb59c4 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/b37427017efa4c43ab8eb81797cb59c4 2024-12-08T00:20:13,638 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/4ed70870fd3741f6b7be77672e3a7661 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/4ed70870fd3741f6b7be77672e3a7661 2024-12-08T00:20:13,639 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/66b0803397924580960c1f80670039fe to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/66b0803397924580960c1f80670039fe 2024-12-08T00:20:13,641 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/f65b042ddb434df2a6bd18f246702af5 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/f65b042ddb434df2a6bd18f246702af5 2024-12-08T00:20:13,642 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/9cc72c7b3a444e189f335f8dfd973e7d to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/9cc72c7b3a444e189f335f8dfd973e7d 2024-12-08T00:20:13,643 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/9bb8d29d4a7e4bdc81782d10323bde68 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/9bb8d29d4a7e4bdc81782d10323bde68 2024-12-08T00:20:13,644 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/0b7f11a226c748769542081466054dbe to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/0b7f11a226c748769542081466054dbe 2024-12-08T00:20:13,645 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/68286d3cfb694c90b6063d905b2c5e21 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/68286d3cfb694c90b6063d905b2c5e21 2024-12-08T00:20:13,647 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/d1d4b8a11777466a941914cf59357873 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/d1d4b8a11777466a941914cf59357873 2024-12-08T00:20:13,648 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/6199ad78b6c9435990cdeb693b878c92 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/6199ad78b6c9435990cdeb693b878c92 2024-12-08T00:20:13,649 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/73b1c1bd3da54031b59f9aa5734d7f48 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/73b1c1bd3da54031b59f9aa5734d7f48 2024-12-08T00:20:13,650 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/05574ef9617549da989183ef24de0bf8 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/05574ef9617549da989183ef24de0bf8 2024-12-08T00:20:13,651 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/2856c69bad0b40388efa3f9f535536d7 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/2856c69bad0b40388efa3f9f535536d7 2024-12-08T00:20:13,652 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/433891a92b18436091211bdd1d7f3c9b to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/433891a92b18436091211bdd1d7f3c9b 2024-12-08T00:20:13,653 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/4c7581889ec34f7cbbb6c432e2cc434c to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/4c7581889ec34f7cbbb6c432e2cc434c 2024-12-08T00:20:13,654 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/3e9c9457683a40aa9042f216885f8b13 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/3e9c9457683a40aa9042f216885f8b13 2024-12-08T00:20:13,656 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/6c1c4bd6a60547a9864c4515dc49baa9 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/6c1c4bd6a60547a9864c4515dc49baa9 2024-12-08T00:20:13,657 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/33ebae3e9de04ed6b8862f3015d1f62a to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/33ebae3e9de04ed6b8862f3015d1f62a 2024-12-08T00:20:13,658 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/5fee7ba809d5448bb2a6fce7c8f8a9bd to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/5fee7ba809d5448bb2a6fce7c8f8a9bd 2024-12-08T00:20:13,659 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/3eeb460a8b6a4849afa19787848da2c6 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/3eeb460a8b6a4849afa19787848da2c6 2024-12-08T00:20:13,660 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/ffab165b63f44afe8199ca47a303a6d6 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/ffab165b63f44afe8199ca47a303a6d6 2024-12-08T00:20:13,661 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/5958e86bb12d42eb8f5019d032f1ade1 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/5958e86bb12d42eb8f5019d032f1ade1 2024-12-08T00:20:13,662 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/33fd8157b04b41e4aac271a721f25bfd to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/33fd8157b04b41e4aac271a721f25bfd 2024-12-08T00:20:13,664 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/028745e00a8c4d3b93d6ad4f179f36bc to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/028745e00a8c4d3b93d6ad4f179f36bc 2024-12-08T00:20:13,665 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/1604de5eb43c41bba57be8d23847ba67 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/1604de5eb43c41bba57be8d23847ba67 2024-12-08T00:20:13,666 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/dccfbc395d1b47ebbcd5e9353f505cb6 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/dccfbc395d1b47ebbcd5e9353f505cb6 2024-12-08T00:20:13,668 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/b34677252a3147b1bed3999ceaef7942 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/b34677252a3147b1bed3999ceaef7942 2024-12-08T00:20:13,669 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/e25ad532713b40819f1c846fe0e6d975 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/e25ad532713b40819f1c846fe0e6d975 2024-12-08T00:20:13,670 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/460a97921ae54770b5ca84685b067a51 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/460a97921ae54770b5ca84685b067a51 2024-12-08T00:20:13,671 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/908ab1a013ed4c8c9211d1ef1dbfdff8 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/908ab1a013ed4c8c9211d1ef1dbfdff8 2024-12-08T00:20:13,672 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/43b2105185dd43ccbd21d931abc68267 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/43b2105185dd43ccbd21d931abc68267 2024-12-08T00:20:13,673 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/aefaddc7b3f649ec822d47ebce71a491 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/aefaddc7b3f649ec822d47ebce71a491 2024-12-08T00:20:13,674 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/1cf7e5c858194411b4ec57eb9641163c to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/1cf7e5c858194411b4ec57eb9641163c 2024-12-08T00:20:13,675 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/dd9308f78d924a5ba5edba4a73892050 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/dd9308f78d924a5ba5edba4a73892050 2024-12-08T00:20:13,676 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/5b6fee2eb08249928c46086e3cb18c01 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/5b6fee2eb08249928c46086e3cb18c01 2024-12-08T00:20:13,678 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/f0e3e6500b194014a13f9d5f56c3b305 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/f0e3e6500b194014a13f9d5f56c3b305 2024-12-08T00:20:13,679 DEBUG [StoreCloser-TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/897b2018c0604e7c9a7814a18116a4f3 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/897b2018c0604e7c9a7814a18116a4f3 2024-12-08T00:20:13,684 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/recovered.edits/637.seqid, newMaxSeqId=637, maxSeqId=1 2024-12-08T00:20:13,687 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce. 2024-12-08T00:20:13,687 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1635): Region close journal for f51bdc360ee4fbe2f9447c9b6b4bf1ce: 2024-12-08T00:20:13,689 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] handler.UnassignRegionHandler(170): Closed f51bdc360ee4fbe2f9447c9b6b4bf1ce 2024-12-08T00:20:13,689 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=32 updating hbase:meta row=f51bdc360ee4fbe2f9447c9b6b4bf1ce, regionState=CLOSED 2024-12-08T00:20:13,692 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=33, resume processing ppid=32 2024-12-08T00:20:13,692 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=33, ppid=32, state=SUCCESS; CloseRegionProcedure f51bdc360ee4fbe2f9447c9b6b4bf1ce, server=017dd09fb407,36703,1733617179335 in 1.6220 sec 2024-12-08T00:20:13,693 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=32, resume processing ppid=31 2024-12-08T00:20:13,693 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=32, ppid=31, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=f51bdc360ee4fbe2f9447c9b6b4bf1ce, UNASSIGN in 1.6260 sec 2024-12-08T00:20:13,695 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=31, resume processing ppid=30 2024-12-08T00:20:13,695 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=31, ppid=30, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.6320 sec 2024-12-08T00:20:13,696 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733617213696"}]},"ts":"1733617213696"} 2024-12-08T00:20:13,697 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-08T00:20:13,700 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-08T00:20:13,701 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=30, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.6490 sec 2024-12-08T00:20:14,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-08T00:20:14,163 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 30 completed 2024-12-08T00:20:14,166 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-08T00:20:14,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=34, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T00:20:14,172 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=34, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T00:20:14,174 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=34, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T00:20:14,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-12-08T00:20:14,177 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce 2024-12-08T00:20:14,182 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A, FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B, FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C, FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/recovered.edits] 2024-12-08T00:20:14,186 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/65ed81ef152d46e9b84a7531a3679e41 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/65ed81ef152d46e9b84a7531a3679e41 2024-12-08T00:20:14,188 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/8a63aac20cd846dd9c93b520e435858c to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/8a63aac20cd846dd9c93b520e435858c 2024-12-08T00:20:14,190 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/b5433650ada84ed8af920c746ec1d0ac to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/A/b5433650ada84ed8af920c746ec1d0ac 2024-12-08T00:20:14,193 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/6fdafc79d97d42d4b9915e960fb49d0f to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/6fdafc79d97d42d4b9915e960fb49d0f 2024-12-08T00:20:14,194 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/9402fe6c86c9431dbee59e7bf4c6c8f3 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/9402fe6c86c9431dbee59e7bf4c6c8f3 2024-12-08T00:20:14,196 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/946218c86d2b40b88760838acc32f4e7 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/B/946218c86d2b40b88760838acc32f4e7 2024-12-08T00:20:14,199 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/0ba7c3e255c74cc38c393b8fc8fdcd20 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/0ba7c3e255c74cc38c393b8fc8fdcd20 2024-12-08T00:20:14,201 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/7d13e8bef5a64c3da0137c93fb7bd954 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/7d13e8bef5a64c3da0137c93fb7bd954 2024-12-08T00:20:14,203 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/d881dd8234104bc0beffe893754d3f86 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/C/d881dd8234104bc0beffe893754d3f86 2024-12-08T00:20:14,207 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/recovered.edits/637.seqid to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce/recovered.edits/637.seqid 2024-12-08T00:20:14,207 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/f51bdc360ee4fbe2f9447c9b6b4bf1ce 2024-12-08T00:20:14,207 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-08T00:20:14,213 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=34, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T00:20:14,218 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-12-08T00:20:14,222 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-08T00:20:14,254 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-12-08T00:20:14,256 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=34, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T00:20:14,256 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-12-08T00:20:14,256 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733617214256"}]},"ts":"9223372036854775807"} 2024-12-08T00:20:14,259 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-08T00:20:14,259 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => f51bdc360ee4fbe2f9447c9b6b4bf1ce, NAME => 'TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce.', STARTKEY => '', ENDKEY => ''}] 2024-12-08T00:20:14,260 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-12-08T00:20:14,260 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733617214260"}]},"ts":"9223372036854775807"} 2024-12-08T00:20:14,263 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-08T00:20:14,265 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=34, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T00:20:14,267 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=34, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 98 msec 2024-12-08T00:20:14,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-12-08T00:20:14,275 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 34 completed 2024-12-08T00:20:14,290 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=238 (was 219) Potentially hanging thread: hconnection-0x4da31e77-shared-pool-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x4da31e77-shared-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RS:0;017dd09fb407:36703-shortCompactions-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.PriorityBlockingQueue.take(PriorityBlockingQueue.java:535) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x4da31e77-shared-pool-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/017dd09fb407:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1079539836_22 at /127.0.0.1:34444 [Waiting for operation #13] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x4da31e77-shared-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=452 (was 444) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=392 (was 219) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=7837 (was 8951) 2024-12-08T00:20:14,301 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=238, OpenFileDescriptor=452, MaxFileDescriptor=1048576, SystemLoadAverage=392, ProcessCount=11, AvailableMemoryMB=7836 2024-12-08T00:20:14,303 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-08T00:20:14,303 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T00:20:14,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=35, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-08T00:20:14,306 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=35, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-08T00:20:14,306 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:14,306 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 35 2024-12-08T00:20:14,307 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=35, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-08T00:20:14,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-08T00:20:14,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741977_1153 (size=963) 2024-12-08T00:20:14,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-08T00:20:14,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-08T00:20:14,716 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3 2024-12-08T00:20:14,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741978_1154 (size=53) 2024-12-08T00:20:14,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-08T00:20:15,123 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:20:15,123 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing fabe935a14e4a2f5a6e3e15c47ba0977, disabling compactions & flushes 2024-12-08T00:20:15,123 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:15,123 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:15,123 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. after waiting 0 ms 2024-12-08T00:20:15,123 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:15,123 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:15,123 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:15,125 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=35, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-08T00:20:15,125 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733617215125"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733617215125"}]},"ts":"1733617215125"} 2024-12-08T00:20:15,127 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-08T00:20:15,127 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=35, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-08T00:20:15,128 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733617215127"}]},"ts":"1733617215127"} 2024-12-08T00:20:15,129 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-08T00:20:15,133 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=36, ppid=35, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=fabe935a14e4a2f5a6e3e15c47ba0977, ASSIGN}] 2024-12-08T00:20:15,134 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=36, ppid=35, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=fabe935a14e4a2f5a6e3e15c47ba0977, ASSIGN 2024-12-08T00:20:15,135 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=36, ppid=35, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=fabe935a14e4a2f5a6e3e15c47ba0977, ASSIGN; state=OFFLINE, location=017dd09fb407,36703,1733617179335; forceNewPlan=false, retain=false 2024-12-08T00:20:15,285 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=36 updating hbase:meta row=fabe935a14e4a2f5a6e3e15c47ba0977, regionState=OPENING, regionLocation=017dd09fb407,36703,1733617179335 2024-12-08T00:20:15,287 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=37, ppid=36, state=RUNNABLE; OpenRegionProcedure fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335}] 2024-12-08T00:20:15,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-08T00:20:15,439 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:15,442 INFO [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:15,443 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(7285): Opening region: {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} 2024-12-08T00:20:15,443 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:15,443 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:20:15,443 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(7327): checking encryption for fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:15,443 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(7330): checking classloading for fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:15,445 INFO [StoreOpener-fabe935a14e4a2f5a6e3e15c47ba0977-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:15,446 INFO [StoreOpener-fabe935a14e4a2f5a6e3e15c47ba0977-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-08T00:20:15,447 INFO [StoreOpener-fabe935a14e4a2f5a6e3e15c47ba0977-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fabe935a14e4a2f5a6e3e15c47ba0977 columnFamilyName A 2024-12-08T00:20:15,447 DEBUG [StoreOpener-fabe935a14e4a2f5a6e3e15c47ba0977-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:15,448 INFO [StoreOpener-fabe935a14e4a2f5a6e3e15c47ba0977-1 {}] regionserver.HStore(327): Store=fabe935a14e4a2f5a6e3e15c47ba0977/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:20:15,448 INFO [StoreOpener-fabe935a14e4a2f5a6e3e15c47ba0977-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:15,450 INFO [StoreOpener-fabe935a14e4a2f5a6e3e15c47ba0977-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-08T00:20:15,450 INFO [StoreOpener-fabe935a14e4a2f5a6e3e15c47ba0977-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fabe935a14e4a2f5a6e3e15c47ba0977 columnFamilyName B 2024-12-08T00:20:15,451 DEBUG [StoreOpener-fabe935a14e4a2f5a6e3e15c47ba0977-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:15,451 INFO [StoreOpener-fabe935a14e4a2f5a6e3e15c47ba0977-1 {}] regionserver.HStore(327): Store=fabe935a14e4a2f5a6e3e15c47ba0977/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:20:15,452 INFO [StoreOpener-fabe935a14e4a2f5a6e3e15c47ba0977-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:15,453 INFO [StoreOpener-fabe935a14e4a2f5a6e3e15c47ba0977-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-08T00:20:15,453 INFO [StoreOpener-fabe935a14e4a2f5a6e3e15c47ba0977-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fabe935a14e4a2f5a6e3e15c47ba0977 columnFamilyName C 2024-12-08T00:20:15,453 DEBUG [StoreOpener-fabe935a14e4a2f5a6e3e15c47ba0977-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:15,454 INFO [StoreOpener-fabe935a14e4a2f5a6e3e15c47ba0977-1 {}] regionserver.HStore(327): Store=fabe935a14e4a2f5a6e3e15c47ba0977/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:20:15,454 INFO [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:15,455 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:15,455 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:15,457 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-08T00:20:15,458 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(1085): writing seq id for fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:15,460 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T00:20:15,461 INFO [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(1102): Opened fabe935a14e4a2f5a6e3e15c47ba0977; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66539297, jitterRate=-0.008487209677696228}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-08T00:20:15,462 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(1001): Region open journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:15,462 INFO [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977., pid=37, masterSystemTime=1733617215439 2024-12-08T00:20:15,464 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:15,464 INFO [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:15,465 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=36 updating hbase:meta row=fabe935a14e4a2f5a6e3e15c47ba0977, regionState=OPEN, openSeqNum=2, regionLocation=017dd09fb407,36703,1733617179335 2024-12-08T00:20:15,468 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=37, resume processing ppid=36 2024-12-08T00:20:15,468 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=37, ppid=36, state=SUCCESS; OpenRegionProcedure fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 in 179 msec 2024-12-08T00:20:15,469 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=36, resume processing ppid=35 2024-12-08T00:20:15,469 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=36, ppid=35, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=fabe935a14e4a2f5a6e3e15c47ba0977, ASSIGN in 335 msec 2024-12-08T00:20:15,470 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=35, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-08T00:20:15,470 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733617215470"}]},"ts":"1733617215470"} 2024-12-08T00:20:15,471 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-08T00:20:15,474 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=35, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-08T00:20:15,475 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=35, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1710 sec 2024-12-08T00:20:16,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-08T00:20:16,413 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 35 completed 2024-12-08T00:20:16,414 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7a9b9802 to 127.0.0.1:62287 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@118b007e 2024-12-08T00:20:16,418 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5d29de25, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:20:16,420 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:20:16,422 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57390, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:20:16,424 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-08T00:20:16,425 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59114, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-08T00:20:16,432 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-08T00:20:16,432 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T00:20:16,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=38, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-08T00:20:16,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741979_1155 (size=999) 2024-12-08T00:20:16,851 DEBUG [PEWorker-1 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-08T00:20:16,851 INFO [PEWorker-1 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-08T00:20:16,854 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-08T00:20:16,863 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=fabe935a14e4a2f5a6e3e15c47ba0977, REOPEN/MOVE}] 2024-12-08T00:20:16,864 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=fabe935a14e4a2f5a6e3e15c47ba0977, REOPEN/MOVE 2024-12-08T00:20:16,865 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=40 updating hbase:meta row=fabe935a14e4a2f5a6e3e15c47ba0977, regionState=CLOSING, regionLocation=017dd09fb407,36703,1733617179335 2024-12-08T00:20:16,866 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-08T00:20:16,866 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=41, ppid=40, state=RUNNABLE; CloseRegionProcedure fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335}] 2024-12-08T00:20:17,018 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:17,019 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] handler.UnassignRegionHandler(124): Close fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:17,019 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-08T00:20:17,019 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1681): Closing fabe935a14e4a2f5a6e3e15c47ba0977, disabling compactions & flushes 2024-12-08T00:20:17,019 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:17,019 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:17,019 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. after waiting 0 ms 2024-12-08T00:20:17,019 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:17,023 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-08T00:20:17,023 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:17,024 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1635): Region close journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:17,024 WARN [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegionServer(3786): Not adding moved region record: fabe935a14e4a2f5a6e3e15c47ba0977 to self. 2024-12-08T00:20:17,025 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] handler.UnassignRegionHandler(170): Closed fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:17,026 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=40 updating hbase:meta row=fabe935a14e4a2f5a6e3e15c47ba0977, regionState=CLOSED 2024-12-08T00:20:17,028 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=41, resume processing ppid=40 2024-12-08T00:20:17,028 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=41, ppid=40, state=SUCCESS; CloseRegionProcedure fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 in 161 msec 2024-12-08T00:20:17,029 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=fabe935a14e4a2f5a6e3e15c47ba0977, REOPEN/MOVE; state=CLOSED, location=017dd09fb407,36703,1733617179335; forceNewPlan=false, retain=true 2024-12-08T00:20:17,179 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=40 updating hbase:meta row=fabe935a14e4a2f5a6e3e15c47ba0977, regionState=OPENING, regionLocation=017dd09fb407,36703,1733617179335 2024-12-08T00:20:17,181 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=42, ppid=40, state=RUNNABLE; OpenRegionProcedure fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335}] 2024-12-08T00:20:17,333 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:17,336 INFO [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:17,336 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(7285): Opening region: {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} 2024-12-08T00:20:17,336 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:17,336 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:20:17,336 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(7327): checking encryption for fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:17,336 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(7330): checking classloading for fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:17,339 INFO [StoreOpener-fabe935a14e4a2f5a6e3e15c47ba0977-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:17,339 INFO [StoreOpener-fabe935a14e4a2f5a6e3e15c47ba0977-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-08T00:20:17,345 INFO [StoreOpener-fabe935a14e4a2f5a6e3e15c47ba0977-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fabe935a14e4a2f5a6e3e15c47ba0977 columnFamilyName A 2024-12-08T00:20:17,346 DEBUG [StoreOpener-fabe935a14e4a2f5a6e3e15c47ba0977-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:17,347 INFO [StoreOpener-fabe935a14e4a2f5a6e3e15c47ba0977-1 {}] regionserver.HStore(327): Store=fabe935a14e4a2f5a6e3e15c47ba0977/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:20:17,347 INFO [StoreOpener-fabe935a14e4a2f5a6e3e15c47ba0977-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:17,348 INFO [StoreOpener-fabe935a14e4a2f5a6e3e15c47ba0977-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-08T00:20:17,348 INFO [StoreOpener-fabe935a14e4a2f5a6e3e15c47ba0977-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fabe935a14e4a2f5a6e3e15c47ba0977 columnFamilyName B 2024-12-08T00:20:17,348 DEBUG [StoreOpener-fabe935a14e4a2f5a6e3e15c47ba0977-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:17,349 INFO [StoreOpener-fabe935a14e4a2f5a6e3e15c47ba0977-1 {}] regionserver.HStore(327): Store=fabe935a14e4a2f5a6e3e15c47ba0977/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:20:17,349 INFO [StoreOpener-fabe935a14e4a2f5a6e3e15c47ba0977-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:17,349 INFO [StoreOpener-fabe935a14e4a2f5a6e3e15c47ba0977-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-08T00:20:17,349 INFO [StoreOpener-fabe935a14e4a2f5a6e3e15c47ba0977-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fabe935a14e4a2f5a6e3e15c47ba0977 columnFamilyName C 2024-12-08T00:20:17,349 DEBUG [StoreOpener-fabe935a14e4a2f5a6e3e15c47ba0977-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:17,350 INFO [StoreOpener-fabe935a14e4a2f5a6e3e15c47ba0977-1 {}] regionserver.HStore(327): Store=fabe935a14e4a2f5a6e3e15c47ba0977/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:20:17,350 INFO [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:17,351 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:17,351 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:17,353 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-08T00:20:17,354 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(1085): writing seq id for fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:17,355 INFO [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(1102): Opened fabe935a14e4a2f5a6e3e15c47ba0977; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73464263, jitterRate=0.0947028249502182}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-08T00:20:17,357 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(1001): Region open journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:17,358 INFO [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977., pid=42, masterSystemTime=1733617217332 2024-12-08T00:20:17,359 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:17,359 INFO [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:17,360 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=40 updating hbase:meta row=fabe935a14e4a2f5a6e3e15c47ba0977, regionState=OPEN, openSeqNum=5, regionLocation=017dd09fb407,36703,1733617179335 2024-12-08T00:20:17,362 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=42, resume processing ppid=40 2024-12-08T00:20:17,362 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=42, ppid=40, state=SUCCESS; OpenRegionProcedure fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 in 180 msec 2024-12-08T00:20:17,363 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=40, resume processing ppid=39 2024-12-08T00:20:17,363 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=40, ppid=39, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=fabe935a14e4a2f5a6e3e15c47ba0977, REOPEN/MOVE in 499 msec 2024-12-08T00:20:17,366 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=39, resume processing ppid=38 2024-12-08T00:20:17,366 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=39, ppid=38, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 512 msec 2024-12-08T00:20:17,369 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=38, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 933 msec 2024-12-08T00:20:17,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-12-08T00:20:17,377 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7cae6c5c to 127.0.0.1:62287 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@79982672 2024-12-08T00:20:17,382 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@433e2b26, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:20:17,383 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5c820ef9 to 127.0.0.1:62287 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7b4bd1ba 2024-12-08T00:20:17,387 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@176c5c1b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:20:17,389 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0b44b1e5 to 127.0.0.1:62287 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@454f1431 2024-12-08T00:20:17,392 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@190853fc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:20:17,393 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x42e904d8 to 127.0.0.1:62287 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@505d5ccd 2024-12-08T00:20:17,396 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c5c4716, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:20:17,397 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0a4c53ed to 127.0.0.1:62287 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@367f47f7 2024-12-08T00:20:17,399 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2885d2d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:20:17,400 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x247c0c93 to 127.0.0.1:62287 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@22e911df 2024-12-08T00:20:17,403 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@78cafade, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:20:17,404 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x517ff977 to 127.0.0.1:62287 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3b727d6e 2024-12-08T00:20:17,406 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14c16cd4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:20:17,407 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3448d233 to 127.0.0.1:62287 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1c7940d9 2024-12-08T00:20:17,410 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@341384e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:20:17,411 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7a11164b to 127.0.0.1:62287 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2c38ee58 2024-12-08T00:20:17,413 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@26b120d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:20:17,416 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T00:20:17,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=43, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees 2024-12-08T00:20:17,418 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=43, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:20:17,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-08T00:20:17,418 DEBUG [hconnection-0x25c0120c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:20:17,419 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=43, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:20:17,419 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=44, ppid=43, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:20:17,420 DEBUG [hconnection-0x82b64cd-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:20:17,422 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57400, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:20:17,425 DEBUG [hconnection-0x4898072e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:20:17,426 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57412, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:20:17,433 DEBUG [hconnection-0x68100ae9-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:20:17,434 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57428, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:20:17,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:17,435 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fabe935a14e4a2f5a6e3e15c47ba0977 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-08T00:20:17,435 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=A 2024-12-08T00:20:17,435 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:17,435 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=B 2024-12-08T00:20:17,436 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:17,436 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=C 2024-12-08T00:20:17,436 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:17,436 DEBUG [hconnection-0x46fd7854-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:20:17,437 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57436, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:20:17,440 DEBUG [hconnection-0x6f5faef9-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:20:17,441 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57452, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:20:17,441 DEBUG [hconnection-0x5dc972f3-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:20:17,441 DEBUG [hconnection-0x2a2a8c25-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:20:17,442 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57468, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:20:17,442 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57478, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:20:17,444 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57490, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:20:17,456 DEBUG [hconnection-0x29879a38-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:20:17,457 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57504, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:20:17,467 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:17,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617277464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:17,468 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:17,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617277464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:17,468 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:17,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617277464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:17,469 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:17,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57412 deadline: 1733617277466, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:17,470 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:17,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733617277468, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:17,518 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412088569cc7366ca41119564072ce3cf87b6_fabe935a14e4a2f5a6e3e15c47ba0977 is 50, key is test_row_0/A:col10/1733617217428/Put/seqid=0 2024-12-08T00:20:17,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-08T00:20:17,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741980_1156 (size=12154) 2024-12-08T00:20:17,543 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:17,549 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412088569cc7366ca41119564072ce3cf87b6_fabe935a14e4a2f5a6e3e15c47ba0977 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412088569cc7366ca41119564072ce3cf87b6_fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:17,552 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/0bc9bbd7f430460db7a7df82eacc89fa, store: [table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:17,569 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/0bc9bbd7f430460db7a7df82eacc89fa is 175, key is test_row_0/A:col10/1733617217428/Put/seqid=0 2024-12-08T00:20:17,571 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:17,572 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-08T00:20:17,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:17,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. as already flushing 2024-12-08T00:20:17,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:17,573 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:17,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:17,574 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:17,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:17,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617277571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:17,574 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:17,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617277573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:17,575 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:17,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57412 deadline: 1733617277571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:17,575 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:17,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733617277573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:17,580 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:17,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617277570, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:17,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741981_1157 (size=30955) 2024-12-08T00:20:17,624 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=15, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/0bc9bbd7f430460db7a7df82eacc89fa 2024-12-08T00:20:17,661 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/1720f9f29927490990f0a4a7121874bb is 50, key is test_row_0/B:col10/1733617217428/Put/seqid=0 2024-12-08T00:20:17,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741982_1158 (size=12001) 2024-12-08T00:20:17,673 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/1720f9f29927490990f0a4a7121874bb 2024-12-08T00:20:17,720 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/b0b761eadb0848c281ecf071b26cc8be is 50, key is test_row_0/C:col10/1733617217428/Put/seqid=0 2024-12-08T00:20:17,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-08T00:20:17,726 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:17,726 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-08T00:20:17,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:17,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. as already flushing 2024-12-08T00:20:17,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:17,727 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:17,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:17,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:17,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741983_1159 (size=12001) 2024-12-08T00:20:17,742 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/b0b761eadb0848c281ecf071b26cc8be 2024-12-08T00:20:17,750 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/0bc9bbd7f430460db7a7df82eacc89fa as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/0bc9bbd7f430460db7a7df82eacc89fa 2024-12-08T00:20:17,755 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/0bc9bbd7f430460db7a7df82eacc89fa, entries=150, sequenceid=15, filesize=30.2 K 2024-12-08T00:20:17,764 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/1720f9f29927490990f0a4a7121874bb as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/1720f9f29927490990f0a4a7121874bb 2024-12-08T00:20:17,769 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/1720f9f29927490990f0a4a7121874bb, entries=150, sequenceid=15, filesize=11.7 K 2024-12-08T00:20:17,771 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/b0b761eadb0848c281ecf071b26cc8be as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/b0b761eadb0848c281ecf071b26cc8be 2024-12-08T00:20:17,776 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/b0b761eadb0848c281ecf071b26cc8be, entries=150, sequenceid=15, filesize=11.7 K 2024-12-08T00:20:17,777 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for fabe935a14e4a2f5a6e3e15c47ba0977 in 342ms, sequenceid=15, compaction requested=false 2024-12-08T00:20:17,777 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:17,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:17,791 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fabe935a14e4a2f5a6e3e15c47ba0977 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-08T00:20:17,795 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=A 2024-12-08T00:20:17,795 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:17,795 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=B 2024-12-08T00:20:17,795 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:17,795 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=C 2024-12-08T00:20:17,795 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:17,812 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412082d94610f22114bb1b3301cbef909771e_fabe935a14e4a2f5a6e3e15c47ba0977 is 50, key is test_row_0/A:col10/1733617217787/Put/seqid=0 2024-12-08T00:20:17,819 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:17,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617277803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:17,820 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:17,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617277811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:17,821 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:17,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733617277814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:17,829 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:17,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617277819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:17,829 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:17,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57412 deadline: 1733617277820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:17,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741984_1160 (size=12154) 2024-12-08T00:20:17,841 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:17,846 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412082d94610f22114bb1b3301cbef909771e_fabe935a14e4a2f5a6e3e15c47ba0977 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412082d94610f22114bb1b3301cbef909771e_fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:17,848 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/ebc5a73dfac04f1991f1efab1d486dd1, store: [table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:17,848 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/ebc5a73dfac04f1991f1efab1d486dd1 is 175, key is test_row_0/A:col10/1733617217787/Put/seqid=0 2024-12-08T00:20:17,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741985_1161 (size=30955) 2024-12-08T00:20:17,861 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=43, memsize=55.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/ebc5a73dfac04f1991f1efab1d486dd1 2024-12-08T00:20:17,880 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:17,881 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-08T00:20:17,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:17,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. as already flushing 2024-12-08T00:20:17,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:17,881 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:17,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:17,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:17,898 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/aa96f1918877443b81fd374eb3817e65 is 50, key is test_row_0/B:col10/1733617217787/Put/seqid=0 2024-12-08T00:20:17,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741986_1162 (size=12001) 2024-12-08T00:20:17,928 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:17,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617277923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:17,928 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:17,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733617277923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:17,928 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:17,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617277924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:17,935 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:17,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617277931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:17,936 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:17,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57412 deadline: 1733617277932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:18,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-08T00:20:18,034 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:18,038 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-08T00:20:18,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:18,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. as already flushing 2024-12-08T00:20:18,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:18,038 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:18,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:18,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:18,133 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:18,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617278130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:18,134 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:18,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733617278131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:18,135 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:18,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617278132, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:18,138 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:18,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57412 deadline: 1733617278137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:18,139 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:18,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617278137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:18,196 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:18,196 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-08T00:20:18,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:18,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. as already flushing 2024-12-08T00:20:18,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:18,197 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:18,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:18,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:18,311 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=43 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/aa96f1918877443b81fd374eb3817e65 2024-12-08T00:20:18,334 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/10425cae7b3641b982e91cea8bde1dad is 50, key is test_row_0/C:col10/1733617217787/Put/seqid=0 2024-12-08T00:20:18,349 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:18,350 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-08T00:20:18,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:18,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. as already flushing 2024-12-08T00:20:18,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:18,350 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:18,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:18,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:18,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741987_1163 (size=12001) 2024-12-08T00:20:18,371 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=43 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/10425cae7b3641b982e91cea8bde1dad 2024-12-08T00:20:18,380 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/ebc5a73dfac04f1991f1efab1d486dd1 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/ebc5a73dfac04f1991f1efab1d486dd1 2024-12-08T00:20:18,385 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/ebc5a73dfac04f1991f1efab1d486dd1, entries=150, sequenceid=43, filesize=30.2 K 2024-12-08T00:20:18,398 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/aa96f1918877443b81fd374eb3817e65 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/aa96f1918877443b81fd374eb3817e65 2024-12-08T00:20:18,406 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/aa96f1918877443b81fd374eb3817e65, entries=150, sequenceid=43, filesize=11.7 K 2024-12-08T00:20:18,407 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/10425cae7b3641b982e91cea8bde1dad as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/10425cae7b3641b982e91cea8bde1dad 2024-12-08T00:20:18,413 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/10425cae7b3641b982e91cea8bde1dad, entries=150, sequenceid=43, filesize=11.7 K 2024-12-08T00:20:18,414 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=40.25 KB/41220 for fabe935a14e4a2f5a6e3e15c47ba0977 in 623ms, sequenceid=43, compaction requested=false 2024-12-08T00:20:18,415 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:18,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:18,448 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fabe935a14e4a2f5a6e3e15c47ba0977 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-08T00:20:18,449 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=A 2024-12-08T00:20:18,449 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:18,449 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=B 2024-12-08T00:20:18,449 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:18,449 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=C 2024-12-08T00:20:18,449 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:18,465 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412080073f921b3cc467ebc74e7138d2e7ec5_fabe935a14e4a2f5a6e3e15c47ba0977 is 50, key is test_row_0/A:col10/1733617218445/Put/seqid=0 2024-12-08T00:20:18,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741988_1164 (size=12154) 2024-12-08T00:20:18,481 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:18,487 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412080073f921b3cc467ebc74e7138d2e7ec5_fabe935a14e4a2f5a6e3e15c47ba0977 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412080073f921b3cc467ebc74e7138d2e7ec5_fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:18,489 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/a3073b49c56c435a8625ee96bf1c2b61, store: [table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:18,489 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/a3073b49c56c435a8625ee96bf1c2b61 is 175, key is test_row_0/A:col10/1733617218445/Put/seqid=0 2024-12-08T00:20:18,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741989_1165 (size=30955) 2024-12-08T00:20:18,500 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=55, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/a3073b49c56c435a8625ee96bf1c2b61 2024-12-08T00:20:18,503 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:18,504 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-08T00:20:18,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:18,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. as already flushing 2024-12-08T00:20:18,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:18,504 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:18,505 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:18,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:18,511 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/679caf93e3b2442a95a0675ba823ca87 is 50, key is test_row_0/B:col10/1733617218445/Put/seqid=0 2024-12-08T00:20:18,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741990_1166 (size=12001) 2024-12-08T00:20:18,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-08T00:20:18,569 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:18,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57412 deadline: 1733617278560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:18,576 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:18,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617278563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:18,577 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:18,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617278566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:18,577 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:18,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733617278566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:18,579 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:18,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617278569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:18,656 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:18,656 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-08T00:20:18,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:18,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. as already flushing 2024-12-08T00:20:18,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:18,657 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:18,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:18,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:18,673 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:18,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57412 deadline: 1733617278671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:18,681 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:18,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617278678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:18,682 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:18,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617278680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:18,682 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:18,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733617278680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:18,683 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:18,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617278681, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:18,810 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:18,810 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-08T00:20:18,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:18,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. as already flushing 2024-12-08T00:20:18,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:18,811 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:18,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:18,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:18,877 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:18,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57412 deadline: 1733617278876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:18,886 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:18,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617278883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:18,886 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:18,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617278884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:18,887 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:18,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733617278885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:18,887 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:18,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617278885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:18,919 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/679caf93e3b2442a95a0675ba823ca87 2024-12-08T00:20:18,942 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/9e87a88e1a7b4cd8bdf83b1ff1171345 is 50, key is test_row_0/C:col10/1733617218445/Put/seqid=0 2024-12-08T00:20:18,964 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:18,965 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-08T00:20:18,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:18,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. as already flushing 2024-12-08T00:20:18,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:18,965 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:18,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:18,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:18,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741991_1167 (size=12001) 2024-12-08T00:20:18,969 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/9e87a88e1a7b4cd8bdf83b1ff1171345 2024-12-08T00:20:18,975 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/a3073b49c56c435a8625ee96bf1c2b61 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/a3073b49c56c435a8625ee96bf1c2b61 2024-12-08T00:20:18,982 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/a3073b49c56c435a8625ee96bf1c2b61, entries=150, sequenceid=55, filesize=30.2 K 2024-12-08T00:20:18,984 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/679caf93e3b2442a95a0675ba823ca87 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/679caf93e3b2442a95a0675ba823ca87 2024-12-08T00:20:18,992 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/679caf93e3b2442a95a0675ba823ca87, entries=150, sequenceid=55, filesize=11.7 K 2024-12-08T00:20:18,993 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/9e87a88e1a7b4cd8bdf83b1ff1171345 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/9e87a88e1a7b4cd8bdf83b1ff1171345 2024-12-08T00:20:19,001 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/9e87a88e1a7b4cd8bdf83b1ff1171345, entries=150, sequenceid=55, filesize=11.7 K 2024-12-08T00:20:19,005 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for fabe935a14e4a2f5a6e3e15c47ba0977 in 556ms, sequenceid=55, compaction requested=true 2024-12-08T00:20:19,005 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:19,005 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fabe935a14e4a2f5a6e3e15c47ba0977:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:20:19,005 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:19,005 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fabe935a14e4a2f5a6e3e15c47ba0977:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T00:20:19,005 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:20:19,005 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:19,005 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fabe935a14e4a2f5a6e3e15c47ba0977:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:20:19,005 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:20:19,005 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:20:19,006 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 92865 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:20:19,006 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:20:19,006 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): fabe935a14e4a2f5a6e3e15c47ba0977/A is initiating minor compaction (all files) 2024-12-08T00:20:19,006 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): fabe935a14e4a2f5a6e3e15c47ba0977/B is initiating minor compaction (all files) 2024-12-08T00:20:19,006 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fabe935a14e4a2f5a6e3e15c47ba0977/A in TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:19,006 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fabe935a14e4a2f5a6e3e15c47ba0977/B in TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:19,007 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/0bc9bbd7f430460db7a7df82eacc89fa, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/ebc5a73dfac04f1991f1efab1d486dd1, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/a3073b49c56c435a8625ee96bf1c2b61] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp, totalSize=90.7 K 2024-12-08T00:20:19,007 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/1720f9f29927490990f0a4a7121874bb, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/aa96f1918877443b81fd374eb3817e65, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/679caf93e3b2442a95a0675ba823ca87] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp, totalSize=35.2 K 2024-12-08T00:20:19,007 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:19,007 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. files: [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/0bc9bbd7f430460db7a7df82eacc89fa, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/ebc5a73dfac04f1991f1efab1d486dd1, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/a3073b49c56c435a8625ee96bf1c2b61] 2024-12-08T00:20:19,007 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 1720f9f29927490990f0a4a7121874bb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1733617217428 2024-12-08T00:20:19,008 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0bc9bbd7f430460db7a7df82eacc89fa, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1733617217428 2024-12-08T00:20:19,008 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting aa96f1918877443b81fd374eb3817e65, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=43, earliestPutTs=1733617217786 2024-12-08T00:20:19,009 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting ebc5a73dfac04f1991f1efab1d486dd1, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=43, earliestPutTs=1733617217786 2024-12-08T00:20:19,010 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 679caf93e3b2442a95a0675ba823ca87, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733617217811 2024-12-08T00:20:19,011 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting a3073b49c56c435a8625ee96bf1c2b61, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733617217811 2024-12-08T00:20:19,025 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:19,026 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fabe935a14e4a2f5a6e3e15c47ba0977#B#compaction#147 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:20:19,027 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/e95cb016f406476d8dcb8902e982067b is 50, key is test_row_0/B:col10/1733617218445/Put/seqid=0 2024-12-08T00:20:19,033 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412084218d4f550bd4f66952c75ae7423c9bc_fabe935a14e4a2f5a6e3e15c47ba0977 store=[table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:19,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741992_1168 (size=12104) 2024-12-08T00:20:19,056 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412084218d4f550bd4f66952c75ae7423c9bc_fabe935a14e4a2f5a6e3e15c47ba0977, store=[table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:19,056 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412084218d4f550bd4f66952c75ae7423c9bc_fabe935a14e4a2f5a6e3e15c47ba0977 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:19,067 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/e95cb016f406476d8dcb8902e982067b as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/e95cb016f406476d8dcb8902e982067b 2024-12-08T00:20:19,073 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fabe935a14e4a2f5a6e3e15c47ba0977/B of fabe935a14e4a2f5a6e3e15c47ba0977 into e95cb016f406476d8dcb8902e982067b(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:20:19,073 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:19,073 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977., storeName=fabe935a14e4a2f5a6e3e15c47ba0977/B, priority=13, startTime=1733617219005; duration=0sec 2024-12-08T00:20:19,073 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:20:19,073 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fabe935a14e4a2f5a6e3e15c47ba0977:B 2024-12-08T00:20:19,074 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:20:19,075 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:20:19,075 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): fabe935a14e4a2f5a6e3e15c47ba0977/C is initiating minor compaction (all files) 2024-12-08T00:20:19,075 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fabe935a14e4a2f5a6e3e15c47ba0977/C in TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:19,075 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/b0b761eadb0848c281ecf071b26cc8be, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/10425cae7b3641b982e91cea8bde1dad, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/9e87a88e1a7b4cd8bdf83b1ff1171345] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp, totalSize=35.2 K 2024-12-08T00:20:19,076 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting b0b761eadb0848c281ecf071b26cc8be, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1733617217428 2024-12-08T00:20:19,076 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 10425cae7b3641b982e91cea8bde1dad, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=43, earliestPutTs=1733617217786 2024-12-08T00:20:19,077 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 9e87a88e1a7b4cd8bdf83b1ff1171345, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733617217811 2024-12-08T00:20:19,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741993_1169 (size=4469) 2024-12-08T00:20:19,081 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fabe935a14e4a2f5a6e3e15c47ba0977#A#compaction#148 average throughput is 0.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:20:19,084 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/45622afb47a6460d97e22e5418e98f12 is 175, key is test_row_0/A:col10/1733617218445/Put/seqid=0 2024-12-08T00:20:19,108 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fabe935a14e4a2f5a6e3e15c47ba0977#C#compaction#149 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:20:19,109 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/db488a4262bc4def84e962940fc3a512 is 50, key is test_row_0/C:col10/1733617218445/Put/seqid=0 2024-12-08T00:20:19,118 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:19,119 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-08T00:20:19,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:19,120 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2837): Flushing fabe935a14e4a2f5a6e3e15c47ba0977 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-08T00:20:19,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=A 2024-12-08T00:20:19,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:19,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=B 2024-12-08T00:20:19,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:19,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=C 2024-12-08T00:20:19,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:19,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741994_1170 (size=31058) 2024-12-08T00:20:19,143 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/45622afb47a6460d97e22e5418e98f12 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/45622afb47a6460d97e22e5418e98f12 2024-12-08T00:20:19,151 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fabe935a14e4a2f5a6e3e15c47ba0977/A of fabe935a14e4a2f5a6e3e15c47ba0977 into 45622afb47a6460d97e22e5418e98f12(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:20:19,151 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:19,151 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977., storeName=fabe935a14e4a2f5a6e3e15c47ba0977/A, priority=13, startTime=1733617219005; duration=0sec 2024-12-08T00:20:19,151 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:19,151 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fabe935a14e4a2f5a6e3e15c47ba0977:A 2024-12-08T00:20:19,175 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412085b7340e4b6734a9da9c5831aecc4158e_fabe935a14e4a2f5a6e3e15c47ba0977 is 50, key is test_row_0/A:col10/1733617218566/Put/seqid=0 2024-12-08T00:20:19,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741995_1171 (size=12104) 2024-12-08T00:20:19,185 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. as already flushing 2024-12-08T00:20:19,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:19,185 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/db488a4262bc4def84e962940fc3a512 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/db488a4262bc4def84e962940fc3a512 2024-12-08T00:20:19,193 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fabe935a14e4a2f5a6e3e15c47ba0977/C of fabe935a14e4a2f5a6e3e15c47ba0977 into db488a4262bc4def84e962940fc3a512(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:20:19,193 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:19,193 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977., storeName=fabe935a14e4a2f5a6e3e15c47ba0977/C, priority=13, startTime=1733617219005; duration=0sec 2024-12-08T00:20:19,193 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:19,193 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fabe935a14e4a2f5a6e3e15c47ba0977:C 2024-12-08T00:20:19,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741996_1172 (size=12154) 2024-12-08T00:20:19,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:19,207 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:19,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733617279197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:19,207 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:19,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617279199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:19,208 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:19,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617279199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:19,210 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412085b7340e4b6734a9da9c5831aecc4158e_fabe935a14e4a2f5a6e3e15c47ba0977 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412085b7340e4b6734a9da9c5831aecc4158e_fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:19,210 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:19,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57412 deadline: 1733617279205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:19,211 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:19,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617279207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:19,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/46bd0440d10e4de0b2b37846157090d7, store: [table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:19,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/46bd0440d10e4de0b2b37846157090d7 is 175, key is test_row_0/A:col10/1733617218566/Put/seqid=0 2024-12-08T00:20:19,248 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-08T00:20:19,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741997_1173 (size=30955) 2024-12-08T00:20:19,255 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=80, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/46bd0440d10e4de0b2b37846157090d7 2024-12-08T00:20:19,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/7650cd9428344cf2a510b77746d2e718 is 50, key is test_row_0/B:col10/1733617218566/Put/seqid=0 2024-12-08T00:20:19,311 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:19,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733617279309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:19,312 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:19,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617279309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:19,314 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:19,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617279310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:19,314 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:19,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57412 deadline: 1733617279312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:19,315 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:19,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617279313, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:19,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741998_1174 (size=12001) 2024-12-08T00:20:19,322 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=80 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/7650cd9428344cf2a510b77746d2e718 2024-12-08T00:20:19,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/84b92731a1df4e2bb5399c3698cd3d3d is 50, key is test_row_0/C:col10/1733617218566/Put/seqid=0 2024-12-08T00:20:19,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741999_1175 (size=12001) 2024-12-08T00:20:19,355 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=80 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/84b92731a1df4e2bb5399c3698cd3d3d 2024-12-08T00:20:19,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/46bd0440d10e4de0b2b37846157090d7 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/46bd0440d10e4de0b2b37846157090d7 2024-12-08T00:20:19,370 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/46bd0440d10e4de0b2b37846157090d7, entries=150, sequenceid=80, filesize=30.2 K 2024-12-08T00:20:19,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/7650cd9428344cf2a510b77746d2e718 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/7650cd9428344cf2a510b77746d2e718 2024-12-08T00:20:19,379 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/7650cd9428344cf2a510b77746d2e718, entries=150, sequenceid=80, filesize=11.7 K 2024-12-08T00:20:19,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/84b92731a1df4e2bb5399c3698cd3d3d as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/84b92731a1df4e2bb5399c3698cd3d3d 2024-12-08T00:20:19,389 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/84b92731a1df4e2bb5399c3698cd3d3d, entries=150, sequenceid=80, filesize=11.7 K 2024-12-08T00:20:19,391 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for fabe935a14e4a2f5a6e3e15c47ba0977 in 271ms, sequenceid=80, compaction requested=false 2024-12-08T00:20:19,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2538): Flush status journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:19,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:19,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=44 2024-12-08T00:20:19,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4106): Remote procedure done, pid=44 2024-12-08T00:20:19,394 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=44, resume processing ppid=43 2024-12-08T00:20:19,395 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=44, ppid=43, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9730 sec 2024-12-08T00:20:19,397 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=43, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees in 1.9790 sec 2024-12-08T00:20:19,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:19,518 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fabe935a14e4a2f5a6e3e15c47ba0977 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-08T00:20:19,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=A 2024-12-08T00:20:19,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:19,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=B 2024-12-08T00:20:19,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:19,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=C 2024-12-08T00:20:19,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:19,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-08T00:20:19,524 INFO [Thread-753 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 43 completed 2024-12-08T00:20:19,530 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T00:20:19,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=45, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees 2024-12-08T00:20:19,532 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=45, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:20:19,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-08T00:20:19,533 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=45, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:20:19,533 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=46, ppid=45, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:20:19,544 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208cab995beabad40b3956189754515038a_fabe935a14e4a2f5a6e3e15c47ba0977 is 50, key is test_row_0/A:col10/1733617219514/Put/seqid=0 2024-12-08T00:20:19,556 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:19,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733617279549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:19,556 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:19,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617279549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:19,559 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:19,560 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:19,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617279554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:19,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57412 deadline: 1733617279555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:19,560 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:19,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617279556, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:19,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742000_1176 (size=14594) 2024-12-08T00:20:19,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-08T00:20:19,659 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:19,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617279659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:19,659 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:19,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733617279659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:19,665 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:19,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57412 deadline: 1733617279661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:19,665 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:19,665 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:19,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617279662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:19,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617279662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:19,685 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:19,686 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-08T00:20:19,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:19,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. as already flushing 2024-12-08T00:20:19,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:19,686 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:19,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:19,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:19,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-08T00:20:19,861 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:19,864 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-08T00:20:19,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:19,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. as already flushing 2024-12-08T00:20:19,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:19,864 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:19,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617279863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:19,864 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:19,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:19,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:19,866 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:19,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733617279863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:19,869 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:19,869 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:19,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617279868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:19,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57412 deadline: 1733617279868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:19,874 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:19,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617279872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:19,983 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:19,992 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208cab995beabad40b3956189754515038a_fabe935a14e4a2f5a6e3e15c47ba0977 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208cab995beabad40b3956189754515038a_fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:19,993 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/c544c04ef7374d18b6dc5ad2b401233f, store: [table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:19,994 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/c544c04ef7374d18b6dc5ad2b401233f is 175, key is test_row_0/A:col10/1733617219514/Put/seqid=0 2024-12-08T00:20:20,017 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:20,018 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-08T00:20:20,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:20,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. as already flushing 2024-12-08T00:20:20,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:20,018 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:20,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:20,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:20,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742001_1177 (size=39549) 2024-12-08T00:20:20,029 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=95, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/c544c04ef7374d18b6dc5ad2b401233f 2024-12-08T00:20:20,039 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/aed3ff48cddd4332aeb73d8a9e8ffe9a is 50, key is test_row_0/B:col10/1733617219514/Put/seqid=0 2024-12-08T00:20:20,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742002_1178 (size=12001) 2024-12-08T00:20:20,075 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=95 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/aed3ff48cddd4332aeb73d8a9e8ffe9a 2024-12-08T00:20:20,099 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/9dfad7e4b1424f88a9c18038f1a7024a is 50, key is test_row_0/C:col10/1733617219514/Put/seqid=0 2024-12-08T00:20:20,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742003_1179 (size=12001) 2024-12-08T00:20:20,114 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=95 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/9dfad7e4b1424f88a9c18038f1a7024a 2024-12-08T00:20:20,122 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/c544c04ef7374d18b6dc5ad2b401233f as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/c544c04ef7374d18b6dc5ad2b401233f 2024-12-08T00:20:20,131 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/c544c04ef7374d18b6dc5ad2b401233f, entries=200, sequenceid=95, filesize=38.6 K 2024-12-08T00:20:20,133 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/aed3ff48cddd4332aeb73d8a9e8ffe9a as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/aed3ff48cddd4332aeb73d8a9e8ffe9a 2024-12-08T00:20:20,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-08T00:20:20,144 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/aed3ff48cddd4332aeb73d8a9e8ffe9a, entries=150, sequenceid=95, filesize=11.7 K 2024-12-08T00:20:20,146 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/9dfad7e4b1424f88a9c18038f1a7024a as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/9dfad7e4b1424f88a9c18038f1a7024a 2024-12-08T00:20:20,152 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/9dfad7e4b1424f88a9c18038f1a7024a, entries=150, sequenceid=95, filesize=11.7 K 2024-12-08T00:20:20,153 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for fabe935a14e4a2f5a6e3e15c47ba0977 in 635ms, sequenceid=95, compaction requested=true 2024-12-08T00:20:20,153 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:20,153 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:20:20,154 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fabe935a14e4a2f5a6e3e15c47ba0977:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:20:20,155 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101562 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:20:20,155 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): fabe935a14e4a2f5a6e3e15c47ba0977/A is initiating minor compaction (all files) 2024-12-08T00:20:20,155 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fabe935a14e4a2f5a6e3e15c47ba0977/A in TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:20,155 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/45622afb47a6460d97e22e5418e98f12, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/46bd0440d10e4de0b2b37846157090d7, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/c544c04ef7374d18b6dc5ad2b401233f] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp, totalSize=99.2 K 2024-12-08T00:20:20,155 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:20,155 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. files: [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/45622afb47a6460d97e22e5418e98f12, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/46bd0440d10e4de0b2b37846157090d7, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/c544c04ef7374d18b6dc5ad2b401233f] 2024-12-08T00:20:20,156 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 45622afb47a6460d97e22e5418e98f12, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733617217811 2024-12-08T00:20:20,157 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 46bd0440d10e4de0b2b37846157090d7, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1733617218490 2024-12-08T00:20:20,157 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:20,157 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting c544c04ef7374d18b6dc5ad2b401233f, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1733617219197 2024-12-08T00:20:20,157 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:20:20,159 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:20:20,159 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): fabe935a14e4a2f5a6e3e15c47ba0977/B is initiating minor compaction (all files) 2024-12-08T00:20:20,159 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fabe935a14e4a2f5a6e3e15c47ba0977/B in TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:20,159 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/e95cb016f406476d8dcb8902e982067b, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/7650cd9428344cf2a510b77746d2e718, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/aed3ff48cddd4332aeb73d8a9e8ffe9a] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp, totalSize=35.3 K 2024-12-08T00:20:20,160 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting e95cb016f406476d8dcb8902e982067b, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733617217811 2024-12-08T00:20:20,161 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 7650cd9428344cf2a510b77746d2e718, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1733617218490 2024-12-08T00:20:20,162 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting aed3ff48cddd4332aeb73d8a9e8ffe9a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1733617219203 2024-12-08T00:20:20,163 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fabe935a14e4a2f5a6e3e15c47ba0977:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T00:20:20,163 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:20,163 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fabe935a14e4a2f5a6e3e15c47ba0977:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:20:20,163 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:20:20,171 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:20,172 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-08T00:20:20,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:20,172 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2837): Flushing fabe935a14e4a2f5a6e3e15c47ba0977 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-08T00:20:20,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=A 2024-12-08T00:20:20,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:20,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=B 2024-12-08T00:20:20,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:20,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=C 2024-12-08T00:20:20,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:20,173 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:20,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:20,180 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. as already flushing 2024-12-08T00:20:20,191 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:20,191 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:20,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617280185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:20,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617280184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:20,193 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:20,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57412 deadline: 1733617280190, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:20,195 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fabe935a14e4a2f5a6e3e15c47ba0977#B#compaction#157 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:20:20,195 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:20,195 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/94c955e544874371aecf6c9ed5d905cb is 50, key is test_row_0/B:col10/1733617219514/Put/seqid=0 2024-12-08T00:20:20,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617280192, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:20,195 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:20,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733617280192, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:20,205 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241208de0afa642ed54168a570f6b21e28473a_fabe935a14e4a2f5a6e3e15c47ba0977 store=[table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:20,208 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241208de0afa642ed54168a570f6b21e28473a_fabe935a14e4a2f5a6e3e15c47ba0977, store=[table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:20,209 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208de0afa642ed54168a570f6b21e28473a_fabe935a14e4a2f5a6e3e15c47ba0977 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:20,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120831a753c7d4ee4b08ad553b42a5f095cb_fabe935a14e4a2f5a6e3e15c47ba0977 is 50, key is test_row_0/A:col10/1733617219554/Put/seqid=0 2024-12-08T00:20:20,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742004_1180 (size=12207) 2024-12-08T00:20:20,245 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/94c955e544874371aecf6c9ed5d905cb as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/94c955e544874371aecf6c9ed5d905cb 2024-12-08T00:20:20,252 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fabe935a14e4a2f5a6e3e15c47ba0977/B of fabe935a14e4a2f5a6e3e15c47ba0977 into 94c955e544874371aecf6c9ed5d905cb(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:20:20,252 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:20,252 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977., storeName=fabe935a14e4a2f5a6e3e15c47ba0977/B, priority=13, startTime=1733617220157; duration=0sec 2024-12-08T00:20:20,252 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:20:20,252 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fabe935a14e4a2f5a6e3e15c47ba0977:B 2024-12-08T00:20:20,252 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:20:20,253 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:20:20,253 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): fabe935a14e4a2f5a6e3e15c47ba0977/C is initiating minor compaction (all files) 2024-12-08T00:20:20,253 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fabe935a14e4a2f5a6e3e15c47ba0977/C in TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:20,254 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/db488a4262bc4def84e962940fc3a512, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/84b92731a1df4e2bb5399c3698cd3d3d, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/9dfad7e4b1424f88a9c18038f1a7024a] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp, totalSize=35.3 K 2024-12-08T00:20:20,254 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting db488a4262bc4def84e962940fc3a512, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733617217811 2024-12-08T00:20:20,254 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 84b92731a1df4e2bb5399c3698cd3d3d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1733617218490 2024-12-08T00:20:20,255 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 9dfad7e4b1424f88a9c18038f1a7024a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1733617219203 2024-12-08T00:20:20,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742005_1181 (size=4469) 2024-12-08T00:20:20,264 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fabe935a14e4a2f5a6e3e15c47ba0977#A#compaction#156 average throughput is 0.27 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:20:20,265 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/52299a07be464cb4b183f367682d007f is 175, key is test_row_0/A:col10/1733617219514/Put/seqid=0 2024-12-08T00:20:20,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742006_1182 (size=12154) 2024-12-08T00:20:20,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:20,287 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120831a753c7d4ee4b08ad553b42a5f095cb_fabe935a14e4a2f5a6e3e15c47ba0977 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120831a753c7d4ee4b08ad553b42a5f095cb_fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:20,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/75bd4d745910457fa58200dac6cad25a, store: [table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:20,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/75bd4d745910457fa58200dac6cad25a is 175, key is test_row_0/A:col10/1733617219554/Put/seqid=0 2024-12-08T00:20:20,294 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fabe935a14e4a2f5a6e3e15c47ba0977#C#compaction#159 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:20:20,295 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/b4732d9c4c104f5bae33f6fb35b784b6 is 50, key is test_row_0/C:col10/1733617219514/Put/seqid=0 2024-12-08T00:20:20,300 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:20,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57412 deadline: 1733617280297, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:20,301 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:20,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617280297, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:20,302 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:20,302 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:20,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617280297, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:20,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733617280297, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:20,302 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:20,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617280298, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:20,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742007_1183 (size=31161) 2024-12-08T00:20:20,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742009_1185 (size=12207) 2024-12-08T00:20:20,383 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/b4732d9c4c104f5bae33f6fb35b784b6 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/b4732d9c4c104f5bae33f6fb35b784b6 2024-12-08T00:20:20,392 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fabe935a14e4a2f5a6e3e15c47ba0977/C of fabe935a14e4a2f5a6e3e15c47ba0977 into b4732d9c4c104f5bae33f6fb35b784b6(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:20:20,392 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:20,392 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977., storeName=fabe935a14e4a2f5a6e3e15c47ba0977/C, priority=13, startTime=1733617220163; duration=0sec 2024-12-08T00:20:20,393 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:20,393 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fabe935a14e4a2f5a6e3e15c47ba0977:C 2024-12-08T00:20:20,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742008_1184 (size=30955) 2024-12-08T00:20:20,397 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=118, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/75bd4d745910457fa58200dac6cad25a 2024-12-08T00:20:20,410 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/61276c8e7e2044e8ae4e02070f1f71e5 is 50, key is test_row_0/B:col10/1733617219554/Put/seqid=0 2024-12-08T00:20:20,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742010_1186 (size=12001) 2024-12-08T00:20:20,428 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/61276c8e7e2044e8ae4e02070f1f71e5 2024-12-08T00:20:20,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/5e4f2f12b39b4b98862a28037eee2ecb is 50, key is test_row_0/C:col10/1733617219554/Put/seqid=0 2024-12-08T00:20:20,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742011_1187 (size=12001) 2024-12-08T00:20:20,503 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:20,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57412 deadline: 1733617280502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:20,504 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:20,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617280503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:20,504 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:20,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617280503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:20,505 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:20,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733617280504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:20,507 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:20,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617280504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:20,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-08T00:20:20,760 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/52299a07be464cb4b183f367682d007f as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/52299a07be464cb4b183f367682d007f 2024-12-08T00:20:20,769 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fabe935a14e4a2f5a6e3e15c47ba0977/A of fabe935a14e4a2f5a6e3e15c47ba0977 into 52299a07be464cb4b183f367682d007f(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:20:20,769 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:20,769 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977., storeName=fabe935a14e4a2f5a6e3e15c47ba0977/A, priority=13, startTime=1733617220153; duration=0sec 2024-12-08T00:20:20,770 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:20,770 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fabe935a14e4a2f5a6e3e15c47ba0977:A 2024-12-08T00:20:20,806 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:20,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733617280806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:20,807 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:20,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57412 deadline: 1733617280807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:20,809 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:20,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617280807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:20,810 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:20,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617280809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:20,812 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:20,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617280810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:20,881 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/5e4f2f12b39b4b98862a28037eee2ecb 2024-12-08T00:20:20,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/75bd4d745910457fa58200dac6cad25a as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/75bd4d745910457fa58200dac6cad25a 2024-12-08T00:20:20,896 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/75bd4d745910457fa58200dac6cad25a, entries=150, sequenceid=118, filesize=30.2 K 2024-12-08T00:20:20,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/61276c8e7e2044e8ae4e02070f1f71e5 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/61276c8e7e2044e8ae4e02070f1f71e5 2024-12-08T00:20:20,905 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/61276c8e7e2044e8ae4e02070f1f71e5, entries=150, sequenceid=118, filesize=11.7 K 2024-12-08T00:20:20,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/5e4f2f12b39b4b98862a28037eee2ecb as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/5e4f2f12b39b4b98862a28037eee2ecb 2024-12-08T00:20:20,915 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/5e4f2f12b39b4b98862a28037eee2ecb, entries=150, sequenceid=118, filesize=11.7 K 2024-12-08T00:20:20,919 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for fabe935a14e4a2f5a6e3e15c47ba0977 in 747ms, sequenceid=118, compaction requested=false 2024-12-08T00:20:20,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2538): Flush status journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:20,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:20,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=46 2024-12-08T00:20:20,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4106): Remote procedure done, pid=46 2024-12-08T00:20:20,922 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=46, resume processing ppid=45 2024-12-08T00:20:20,923 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=46, ppid=45, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3880 sec 2024-12-08T00:20:20,925 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=45, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees in 1.3930 sec 2024-12-08T00:20:21,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:21,316 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fabe935a14e4a2f5a6e3e15c47ba0977 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-08T00:20:21,317 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=A 2024-12-08T00:20:21,318 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:21,318 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=B 2024-12-08T00:20:21,318 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:21,318 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=C 2024-12-08T00:20:21,318 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:21,333 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412082e900a9d054342f1a07d64b1d2e6a6d0_fabe935a14e4a2f5a6e3e15c47ba0977 is 50, key is test_row_0/A:col10/1733617221315/Put/seqid=0 2024-12-08T00:20:21,338 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:21,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57412 deadline: 1733617281334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:21,343 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:21,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617281337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:21,344 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:21,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733617281338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:21,344 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:21,344 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:21,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617281337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:21,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617281339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:21,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742012_1188 (size=14794) 2024-12-08T00:20:21,368 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:21,374 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412082e900a9d054342f1a07d64b1d2e6a6d0_fabe935a14e4a2f5a6e3e15c47ba0977 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412082e900a9d054342f1a07d64b1d2e6a6d0_fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:21,375 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/05e48a06a47f4720b0528e6ab3d57069, store: [table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:21,376 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/05e48a06a47f4720b0528e6ab3d57069 is 175, key is test_row_0/A:col10/1733617221315/Put/seqid=0 2024-12-08T00:20:21,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742013_1189 (size=39749) 2024-12-08T00:20:21,398 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=138, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/05e48a06a47f4720b0528e6ab3d57069 2024-12-08T00:20:21,411 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/12f478a81ecf4e58a681297f341b8574 is 50, key is test_row_0/B:col10/1733617221315/Put/seqid=0 2024-12-08T00:20:21,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742014_1190 (size=12151) 2024-12-08T00:20:21,426 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=138 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/12f478a81ecf4e58a681297f341b8574 2024-12-08T00:20:21,436 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/f7f3e9725fc74b6181be145f7a76e435 is 50, key is test_row_0/C:col10/1733617221315/Put/seqid=0 2024-12-08T00:20:21,441 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:21,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57412 deadline: 1733617281439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:21,446 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:21,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742015_1191 (size=12151) 2024-12-08T00:20:21,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617281445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:21,446 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:21,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733617281445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:21,449 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:21,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617281446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:21,449 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:21,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617281447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:21,455 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=138 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/f7f3e9725fc74b6181be145f7a76e435 2024-12-08T00:20:21,463 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/05e48a06a47f4720b0528e6ab3d57069 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/05e48a06a47f4720b0528e6ab3d57069 2024-12-08T00:20:21,470 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/05e48a06a47f4720b0528e6ab3d57069, entries=200, sequenceid=138, filesize=38.8 K 2024-12-08T00:20:21,471 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/12f478a81ecf4e58a681297f341b8574 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/12f478a81ecf4e58a681297f341b8574 2024-12-08T00:20:21,476 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/12f478a81ecf4e58a681297f341b8574, entries=150, sequenceid=138, filesize=11.9 K 2024-12-08T00:20:21,477 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/f7f3e9725fc74b6181be145f7a76e435 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/f7f3e9725fc74b6181be145f7a76e435 2024-12-08T00:20:21,484 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/f7f3e9725fc74b6181be145f7a76e435, entries=150, sequenceid=138, filesize=11.9 K 2024-12-08T00:20:21,485 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=114.05 KB/116790 for fabe935a14e4a2f5a6e3e15c47ba0977 in 169ms, sequenceid=138, compaction requested=true 2024-12-08T00:20:21,485 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:21,485 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:20:21,487 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101865 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:20:21,487 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): fabe935a14e4a2f5a6e3e15c47ba0977/A is initiating minor compaction (all files) 2024-12-08T00:20:21,487 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fabe935a14e4a2f5a6e3e15c47ba0977/A in TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:21,487 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/52299a07be464cb4b183f367682d007f, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/75bd4d745910457fa58200dac6cad25a, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/05e48a06a47f4720b0528e6ab3d57069] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp, totalSize=99.5 K 2024-12-08T00:20:21,487 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:21,487 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. files: [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/52299a07be464cb4b183f367682d007f, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/75bd4d745910457fa58200dac6cad25a, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/05e48a06a47f4720b0528e6ab3d57069] 2024-12-08T00:20:21,488 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fabe935a14e4a2f5a6e3e15c47ba0977:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:20:21,488 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 52299a07be464cb4b183f367682d007f, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1733617219203 2024-12-08T00:20:21,489 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 75bd4d745910457fa58200dac6cad25a, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1733617219552 2024-12-08T00:20:21,489 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 05e48a06a47f4720b0528e6ab3d57069, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=138, earliestPutTs=1733617220188 2024-12-08T00:20:21,491 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:21,491 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:20:21,493 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:20:21,493 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): fabe935a14e4a2f5a6e3e15c47ba0977/B is initiating minor compaction (all files) 2024-12-08T00:20:21,493 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fabe935a14e4a2f5a6e3e15c47ba0977/B in TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:21,494 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/94c955e544874371aecf6c9ed5d905cb, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/61276c8e7e2044e8ae4e02070f1f71e5, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/12f478a81ecf4e58a681297f341b8574] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp, totalSize=35.5 K 2024-12-08T00:20:21,494 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 94c955e544874371aecf6c9ed5d905cb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1733617219203 2024-12-08T00:20:21,494 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fabe935a14e4a2f5a6e3e15c47ba0977:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T00:20:21,495 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 61276c8e7e2044e8ae4e02070f1f71e5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1733617219552 2024-12-08T00:20:21,495 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:21,495 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fabe935a14e4a2f5a6e3e15c47ba0977:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:20:21,495 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:20:21,497 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 12f478a81ecf4e58a681297f341b8574, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=138, earliestPutTs=1733617220188 2024-12-08T00:20:21,509 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:21,514 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fabe935a14e4a2f5a6e3e15c47ba0977#B#compaction#166 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:20:21,515 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/a984c31a308f4dbd992baee78e312211 is 50, key is test_row_0/B:col10/1733617221315/Put/seqid=0 2024-12-08T00:20:21,525 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024120851b0b0a665424acdb6d347f4ea60e150_fabe935a14e4a2f5a6e3e15c47ba0977 store=[table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:21,528 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024120851b0b0a665424acdb6d347f4ea60e150_fabe935a14e4a2f5a6e3e15c47ba0977, store=[table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:21,528 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120851b0b0a665424acdb6d347f4ea60e150_fabe935a14e4a2f5a6e3e15c47ba0977 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:21,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742017_1193 (size=4469) 2024-12-08T00:20:21,551 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fabe935a14e4a2f5a6e3e15c47ba0977#A#compaction#165 average throughput is 0.58 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:20:21,552 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/f3dababedc2847a49e517817ca5c85f9 is 175, key is test_row_0/A:col10/1733617221315/Put/seqid=0 2024-12-08T00:20:21,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742016_1192 (size=12459) 2024-12-08T00:20:21,564 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/a984c31a308f4dbd992baee78e312211 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/a984c31a308f4dbd992baee78e312211 2024-12-08T00:20:21,570 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fabe935a14e4a2f5a6e3e15c47ba0977/B of fabe935a14e4a2f5a6e3e15c47ba0977 into a984c31a308f4dbd992baee78e312211(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:20:21,570 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:21,571 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977., storeName=fabe935a14e4a2f5a6e3e15c47ba0977/B, priority=13, startTime=1733617221491; duration=0sec 2024-12-08T00:20:21,571 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:20:21,571 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fabe935a14e4a2f5a6e3e15c47ba0977:B 2024-12-08T00:20:21,571 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:20:21,573 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:20:21,573 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): fabe935a14e4a2f5a6e3e15c47ba0977/C is initiating minor compaction (all files) 2024-12-08T00:20:21,574 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fabe935a14e4a2f5a6e3e15c47ba0977/C in TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:21,574 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/b4732d9c4c104f5bae33f6fb35b784b6, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/5e4f2f12b39b4b98862a28037eee2ecb, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/f7f3e9725fc74b6181be145f7a76e435] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp, totalSize=35.5 K 2024-12-08T00:20:21,575 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting b4732d9c4c104f5bae33f6fb35b784b6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1733617219203 2024-12-08T00:20:21,576 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 5e4f2f12b39b4b98862a28037eee2ecb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1733617219552 2024-12-08T00:20:21,577 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting f7f3e9725fc74b6181be145f7a76e435, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=138, earliestPutTs=1733617220188 2024-12-08T00:20:21,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742018_1194 (size=31413) 2024-12-08T00:20:21,595 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fabe935a14e4a2f5a6e3e15c47ba0977#C#compaction#167 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:20:21,595 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/c088ddf17e7c484b954d055c1f95a81d is 50, key is test_row_0/C:col10/1733617221315/Put/seqid=0 2024-12-08T00:20:21,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742019_1195 (size=12459) 2024-12-08T00:20:21,619 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/c088ddf17e7c484b954d055c1f95a81d as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/c088ddf17e7c484b954d055c1f95a81d 2024-12-08T00:20:21,628 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fabe935a14e4a2f5a6e3e15c47ba0977/C of fabe935a14e4a2f5a6e3e15c47ba0977 into c088ddf17e7c484b954d055c1f95a81d(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:20:21,628 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:21,628 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977., storeName=fabe935a14e4a2f5a6e3e15c47ba0977/C, priority=13, startTime=1733617221495; duration=0sec 2024-12-08T00:20:21,628 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:21,629 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fabe935a14e4a2f5a6e3e15c47ba0977:C 2024-12-08T00:20:21,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-08T00:20:21,638 INFO [Thread-753 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 45 completed 2024-12-08T00:20:21,640 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T00:20:21,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees 2024-12-08T00:20:21,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-08T00:20:21,642 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:20:21,642 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:20:21,643 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=48, ppid=47, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:20:21,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:21,644 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fabe935a14e4a2f5a6e3e15c47ba0977 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-08T00:20:21,645 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=A 2024-12-08T00:20:21,645 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:21,645 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=B 2024-12-08T00:20:21,645 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:21,645 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=C 2024-12-08T00:20:21,645 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:21,655 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208c1359021a510405ebe113af6bf47221d_fabe935a14e4a2f5a6e3e15c47ba0977 is 50, key is test_row_0/A:col10/1733617221337/Put/seqid=0 2024-12-08T00:20:21,663 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:21,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617281658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:21,664 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:21,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57412 deadline: 1733617281659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:21,664 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:21,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617281660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:21,664 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:21,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733617281661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:21,665 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:21,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617281661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:21,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742020_1196 (size=12304) 2024-12-08T00:20:21,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-08T00:20:21,766 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:21,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57412 deadline: 1733617281765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:21,767 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:21,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617281765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:21,767 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:21,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733617281766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:21,767 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:21,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617281766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:21,768 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:21,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617281767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:21,794 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:21,795 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-08T00:20:21,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:21,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. as already flushing 2024-12-08T00:20:21,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:21,796 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:21,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:21,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:21,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-08T00:20:21,948 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:21,949 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-08T00:20:21,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:21,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. as already flushing 2024-12-08T00:20:21,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:21,949 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:21,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:21,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:21,969 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:21,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617281968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:21,969 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:21,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733617281968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:21,970 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:21,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617281968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:21,970 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:21,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57412 deadline: 1733617281969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:21,970 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:21,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617281970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:21,989 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/f3dababedc2847a49e517817ca5c85f9 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/f3dababedc2847a49e517817ca5c85f9 2024-12-08T00:20:21,996 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fabe935a14e4a2f5a6e3e15c47ba0977/A of fabe935a14e4a2f5a6e3e15c47ba0977 into f3dababedc2847a49e517817ca5c85f9(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:20:21,996 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:21,996 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977., storeName=fabe935a14e4a2f5a6e3e15c47ba0977/A, priority=13, startTime=1733617221485; duration=0sec 2024-12-08T00:20:21,996 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:21,996 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fabe935a14e4a2f5a6e3e15c47ba0977:A 2024-12-08T00:20:22,075 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:22,081 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208c1359021a510405ebe113af6bf47221d_fabe935a14e4a2f5a6e3e15c47ba0977 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208c1359021a510405ebe113af6bf47221d_fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:22,082 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/6d2e6fa668374eb0b000871328b5f833, store: [table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:22,083 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/6d2e6fa668374eb0b000871328b5f833 is 175, key is test_row_0/A:col10/1733617221337/Put/seqid=0 2024-12-08T00:20:22,101 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:22,102 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-08T00:20:22,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:22,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. as already flushing 2024-12-08T00:20:22,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:22,103 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:22,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:22,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:22,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742021_1197 (size=31105) 2024-12-08T00:20:22,137 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=161, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/6d2e6fa668374eb0b000871328b5f833 2024-12-08T00:20:22,161 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/9c38b74869934b22b722837852235b8e is 50, key is test_row_0/B:col10/1733617221337/Put/seqid=0 2024-12-08T00:20:22,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742022_1198 (size=12151) 2024-12-08T00:20:22,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-08T00:20:22,255 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:22,256 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-08T00:20:22,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:22,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. as already flushing 2024-12-08T00:20:22,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:22,256 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:22,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:22,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:22,273 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:22,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733617282270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:22,274 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:22,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617282272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:22,274 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:22,274 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:22,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617282273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:22,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57412 deadline: 1733617282273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:22,274 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:22,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617282273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:22,408 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:22,409 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-08T00:20:22,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:22,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. as already flushing 2024-12-08T00:20:22,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:22,409 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:22,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:22,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:22,562 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:22,562 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-08T00:20:22,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:22,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. as already flushing 2024-12-08T00:20:22,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:22,563 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:22,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:22,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:22,582 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=161 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/9c38b74869934b22b722837852235b8e 2024-12-08T00:20:22,591 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/219cd8825e9e41fcb0ce46bb6ece5b5d is 50, key is test_row_0/C:col10/1733617221337/Put/seqid=0 2024-12-08T00:20:22,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742023_1199 (size=12151) 2024-12-08T00:20:22,596 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=161 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/219cd8825e9e41fcb0ce46bb6ece5b5d 2024-12-08T00:20:22,602 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/6d2e6fa668374eb0b000871328b5f833 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/6d2e6fa668374eb0b000871328b5f833 2024-12-08T00:20:22,607 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/6d2e6fa668374eb0b000871328b5f833, entries=150, sequenceid=161, filesize=30.4 K 2024-12-08T00:20:22,609 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/9c38b74869934b22b722837852235b8e as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/9c38b74869934b22b722837852235b8e 2024-12-08T00:20:22,614 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/9c38b74869934b22b722837852235b8e, entries=150, sequenceid=161, filesize=11.9 K 2024-12-08T00:20:22,615 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/219cd8825e9e41fcb0ce46bb6ece5b5d as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/219cd8825e9e41fcb0ce46bb6ece5b5d 2024-12-08T00:20:22,620 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/219cd8825e9e41fcb0ce46bb6ece5b5d, entries=150, sequenceid=161, filesize=11.9 K 2024-12-08T00:20:22,621 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=87.22 KB/89310 for fabe935a14e4a2f5a6e3e15c47ba0977 in 977ms, sequenceid=161, compaction requested=false 2024-12-08T00:20:22,621 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:22,715 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:22,716 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-08T00:20:22,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:22,717 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2837): Flushing fabe935a14e4a2f5a6e3e15c47ba0977 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-08T00:20:22,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=A 2024-12-08T00:20:22,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:22,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=B 2024-12-08T00:20:22,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:22,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=C 2024-12-08T00:20:22,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:22,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208308d9cca410c405ba4d8699ab6032263_fabe935a14e4a2f5a6e3e15c47ba0977 is 50, key is test_row_0/A:col10/1733617221652/Put/seqid=0 2024-12-08T00:20:22,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-08T00:20:22,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742024_1200 (size=12304) 2024-12-08T00:20:22,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:22,767 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208308d9cca410c405ba4d8699ab6032263_fabe935a14e4a2f5a6e3e15c47ba0977 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208308d9cca410c405ba4d8699ab6032263_fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:22,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/99bf666bcefb4ed69a4e9e0d03a9dba6, store: [table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:22,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/99bf666bcefb4ed69a4e9e0d03a9dba6 is 175, key is test_row_0/A:col10/1733617221652/Put/seqid=0 2024-12-08T00:20:22,779 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. as already flushing 2024-12-08T00:20:22,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:22,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742025_1201 (size=31105) 2024-12-08T00:20:22,845 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:22,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57412 deadline: 1733617282841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:22,849 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:22,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617282845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:22,849 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:22,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617282846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:22,850 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:22,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617282846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:22,850 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:22,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733617282846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:22,948 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:22,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57412 deadline: 1733617282947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:22,952 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:22,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617282950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:22,952 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:22,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617282951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:22,953 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:22,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617282951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:22,953 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:22,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733617282951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:23,151 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:23,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57412 deadline: 1733617283150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:23,154 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:23,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617283154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:23,155 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:23,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617283155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:23,155 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:23,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617283155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:23,156 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:23,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733617283156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:23,193 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=178, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/99bf666bcefb4ed69a4e9e0d03a9dba6 2024-12-08T00:20:23,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/dda9a3a1baac43fd998a8e110109776c is 50, key is test_row_0/B:col10/1733617221652/Put/seqid=0 2024-12-08T00:20:23,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742026_1202 (size=12151) 2024-12-08T00:20:23,219 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=178 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/dda9a3a1baac43fd998a8e110109776c 2024-12-08T00:20:23,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/82e6e9ba78c540b88777fb9905536b13 is 50, key is test_row_0/C:col10/1733617221652/Put/seqid=0 2024-12-08T00:20:23,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742027_1203 (size=12151) 2024-12-08T00:20:23,240 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=178 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/82e6e9ba78c540b88777fb9905536b13 2024-12-08T00:20:23,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/99bf666bcefb4ed69a4e9e0d03a9dba6 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/99bf666bcefb4ed69a4e9e0d03a9dba6 2024-12-08T00:20:23,254 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/99bf666bcefb4ed69a4e9e0d03a9dba6, entries=150, sequenceid=178, filesize=30.4 K 2024-12-08T00:20:23,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/dda9a3a1baac43fd998a8e110109776c as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/dda9a3a1baac43fd998a8e110109776c 2024-12-08T00:20:23,267 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/dda9a3a1baac43fd998a8e110109776c, entries=150, sequenceid=178, filesize=11.9 K 2024-12-08T00:20:23,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/82e6e9ba78c540b88777fb9905536b13 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/82e6e9ba78c540b88777fb9905536b13 2024-12-08T00:20:23,275 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/82e6e9ba78c540b88777fb9905536b13, entries=150, sequenceid=178, filesize=11.9 K 2024-12-08T00:20:23,278 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=120.76 KB/123660 for fabe935a14e4a2f5a6e3e15c47ba0977 in 560ms, sequenceid=178, compaction requested=true 2024-12-08T00:20:23,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2538): Flush status journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:23,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:23,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=48 2024-12-08T00:20:23,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4106): Remote procedure done, pid=48 2024-12-08T00:20:23,283 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=48, resume processing ppid=47 2024-12-08T00:20:23,284 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=48, ppid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6390 sec 2024-12-08T00:20:23,285 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees in 1.6440 sec 2024-12-08T00:20:23,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:23,484 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fabe935a14e4a2f5a6e3e15c47ba0977 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-08T00:20:23,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=A 2024-12-08T00:20:23,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:23,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=B 2024-12-08T00:20:23,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:23,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=C 2024-12-08T00:20:23,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:23,494 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412087d3aa407c937493e981ef621abd187c3_fabe935a14e4a2f5a6e3e15c47ba0977 is 50, key is test_row_0/A:col10/1733617223453/Put/seqid=0 2024-12-08T00:20:23,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742028_1204 (size=12304) 2024-12-08T00:20:23,529 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:23,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617283524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:23,530 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:23,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733617283524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:23,530 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:23,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617283524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:23,531 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:23,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57412 deadline: 1733617283525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:23,531 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:23,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617283526, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:23,633 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:23,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617283631, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:23,634 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:23,634 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:23,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617283631, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:23,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733617283631, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:23,634 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:23,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57412 deadline: 1733617283632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:23,636 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:23,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617283634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:23,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-08T00:20:23,747 INFO [Thread-753 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 47 completed 2024-12-08T00:20:23,748 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T00:20:23,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees 2024-12-08T00:20:23,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-08T00:20:23,750 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:20:23,750 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:20:23,750 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=50, ppid=49, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:20:23,835 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:23,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617283835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:23,837 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:23,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57412 deadline: 1733617283837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:23,837 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:23,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733617283837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:23,844 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:23,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617283844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:23,844 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:23,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617283844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:23,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-08T00:20:23,901 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:23,902 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-08T00:20:23,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:23,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. as already flushing 2024-12-08T00:20:23,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:23,902 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:23,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:23,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:23,908 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:23,913 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412087d3aa407c937493e981ef621abd187c3_fabe935a14e4a2f5a6e3e15c47ba0977 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412087d3aa407c937493e981ef621abd187c3_fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:23,914 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/e0d73d7bf4d546e8b73c56e203074339, store: [table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:23,915 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/e0d73d7bf4d546e8b73c56e203074339 is 175, key is test_row_0/A:col10/1733617223453/Put/seqid=0 2024-12-08T00:20:23,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742029_1205 (size=31105) 2024-12-08T00:20:23,926 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=202, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/e0d73d7bf4d546e8b73c56e203074339 2024-12-08T00:20:23,940 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/6943eeba406c43b197f2fb7a6ababa83 is 50, key is test_row_0/B:col10/1733617223453/Put/seqid=0 2024-12-08T00:20:23,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742030_1206 (size=12151) 2024-12-08T00:20:24,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-08T00:20:24,054 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:24,055 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-08T00:20:24,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:24,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. as already flushing 2024-12-08T00:20:24,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:24,055 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:24,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:24,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:24,137 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:24,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617284137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:24,142 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:24,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733617284140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:24,142 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:24,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57412 deadline: 1733617284140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:24,148 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:24,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617284146, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:24,149 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:24,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617284148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:24,209 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:24,210 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-08T00:20:24,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:24,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. as already flushing 2024-12-08T00:20:24,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:24,210 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:24,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:24,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:24,333 INFO [master/017dd09fb407:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-08T00:20:24,333 INFO [master/017dd09fb407:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-08T00:20:24,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-08T00:20:24,355 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=202 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/6943eeba406c43b197f2fb7a6ababa83 2024-12-08T00:20:24,362 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:24,363 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-08T00:20:24,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:24,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. as already flushing 2024-12-08T00:20:24,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:24,363 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:24,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:24,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:24,370 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/76fdac4988a6483eb1bee14e53749a5f is 50, key is test_row_0/C:col10/1733617223453/Put/seqid=0 2024-12-08T00:20:24,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742031_1207 (size=12151) 2024-12-08T00:20:24,519 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:24,519 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-08T00:20:24,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:24,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. as already flushing 2024-12-08T00:20:24,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:24,520 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:24,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:24,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:24,641 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:24,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617284640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:24,645 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:24,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733617284644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:24,650 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:24,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57412 deadline: 1733617284645, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:24,654 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:24,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617284652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:24,654 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:24,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617284652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:24,672 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:24,672 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-08T00:20:24,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:24,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. as already flushing 2024-12-08T00:20:24,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:24,673 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:24,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:24,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:24,781 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=202 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/76fdac4988a6483eb1bee14e53749a5f 2024-12-08T00:20:24,787 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/e0d73d7bf4d546e8b73c56e203074339 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/e0d73d7bf4d546e8b73c56e203074339 2024-12-08T00:20:24,795 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/e0d73d7bf4d546e8b73c56e203074339, entries=150, sequenceid=202, filesize=30.4 K 2024-12-08T00:20:24,801 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/6943eeba406c43b197f2fb7a6ababa83 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/6943eeba406c43b197f2fb7a6ababa83 2024-12-08T00:20:24,809 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/6943eeba406c43b197f2fb7a6ababa83, entries=150, sequenceid=202, filesize=11.9 K 2024-12-08T00:20:24,811 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/76fdac4988a6483eb1bee14e53749a5f as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/76fdac4988a6483eb1bee14e53749a5f 2024-12-08T00:20:24,817 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/76fdac4988a6483eb1bee14e53749a5f, entries=150, sequenceid=202, filesize=11.9 K 2024-12-08T00:20:24,818 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for fabe935a14e4a2f5a6e3e15c47ba0977 in 1335ms, sequenceid=202, compaction requested=true 2024-12-08T00:20:24,818 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:24,818 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fabe935a14e4a2f5a6e3e15c47ba0977:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:20:24,818 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:24,818 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fabe935a14e4a2f5a6e3e15c47ba0977:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T00:20:24,818 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:20:24,818 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T00:20:24,818 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fabe935a14e4a2f5a6e3e15c47ba0977:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:20:24,818 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-08T00:20:24,818 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T00:20:24,820 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 124728 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T00:20:24,820 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): fabe935a14e4a2f5a6e3e15c47ba0977/A is initiating minor compaction (all files) 2024-12-08T00:20:24,820 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fabe935a14e4a2f5a6e3e15c47ba0977/A in TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:24,821 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/f3dababedc2847a49e517817ca5c85f9, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/6d2e6fa668374eb0b000871328b5f833, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/99bf666bcefb4ed69a4e9e0d03a9dba6, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/e0d73d7bf4d546e8b73c56e203074339] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp, totalSize=121.8 K 2024-12-08T00:20:24,821 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:24,821 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. files: [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/f3dababedc2847a49e517817ca5c85f9, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/6d2e6fa668374eb0b000871328b5f833, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/99bf666bcefb4ed69a4e9e0d03a9dba6, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/e0d73d7bf4d546e8b73c56e203074339] 2024-12-08T00:20:24,821 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48912 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T00:20:24,821 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): fabe935a14e4a2f5a6e3e15c47ba0977/B is initiating minor compaction (all files) 2024-12-08T00:20:24,821 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fabe935a14e4a2f5a6e3e15c47ba0977/B in TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:24,821 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/a984c31a308f4dbd992baee78e312211, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/9c38b74869934b22b722837852235b8e, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/dda9a3a1baac43fd998a8e110109776c, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/6943eeba406c43b197f2fb7a6ababa83] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp, totalSize=47.8 K 2024-12-08T00:20:24,822 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting a984c31a308f4dbd992baee78e312211, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=138, earliestPutTs=1733617220188 2024-12-08T00:20:24,822 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting f3dababedc2847a49e517817ca5c85f9, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=138, earliestPutTs=1733617220188 2024-12-08T00:20:24,822 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 9c38b74869934b22b722837852235b8e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1733617221335 2024-12-08T00:20:24,822 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6d2e6fa668374eb0b000871328b5f833, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1733617221335 2024-12-08T00:20:24,823 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting dda9a3a1baac43fd998a8e110109776c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=178, earliestPutTs=1733617221652 2024-12-08T00:20:24,823 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 99bf666bcefb4ed69a4e9e0d03a9dba6, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=178, earliestPutTs=1733617221652 2024-12-08T00:20:24,823 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 6943eeba406c43b197f2fb7a6ababa83, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=202, earliestPutTs=1733617222844 2024-12-08T00:20:24,823 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting e0d73d7bf4d546e8b73c56e203074339, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=202, earliestPutTs=1733617222844 2024-12-08T00:20:24,827 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:24,828 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-08T00:20:24,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:24,828 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2837): Flushing fabe935a14e4a2f5a6e3e15c47ba0977 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-08T00:20:24,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=A 2024-12-08T00:20:24,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:24,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=B 2024-12-08T00:20:24,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:24,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=C 2024-12-08T00:20:24,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:24,841 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:24,843 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fabe935a14e4a2f5a6e3e15c47ba0977#B#compaction#177 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:20:24,843 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/e6b927bd5cfa44dd84022122cd292dd4 is 50, key is test_row_0/B:col10/1733617223453/Put/seqid=0 2024-12-08T00:20:24,848 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024120834d37b5262f14fdd942bd1181198ba29_fabe935a14e4a2f5a6e3e15c47ba0977 store=[table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:24,851 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024120834d37b5262f14fdd942bd1181198ba29_fabe935a14e4a2f5a6e3e15c47ba0977, store=[table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:24,851 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120834d37b5262f14fdd942bd1181198ba29_fabe935a14e4a2f5a6e3e15c47ba0977 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:24,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-08T00:20:24,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120803f5af8b9c1941b78bf98a11273b1afb_fabe935a14e4a2f5a6e3e15c47ba0977 is 50, key is test_row_0/A:col10/1733617223524/Put/seqid=0 2024-12-08T00:20:24,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742033_1209 (size=4469) 2024-12-08T00:20:24,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742034_1210 (size=12304) 2024-12-08T00:20:24,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742032_1208 (size=12595) 2024-12-08T00:20:24,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:24,894 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120803f5af8b9c1941b78bf98a11273b1afb_fabe935a14e4a2f5a6e3e15c47ba0977 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120803f5af8b9c1941b78bf98a11273b1afb_fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:24,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/f6e1780ff5ef4bca99f38f179b678194, store: [table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:24,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/f6e1780ff5ef4bca99f38f179b678194 is 175, key is test_row_0/A:col10/1733617223524/Put/seqid=0 2024-12-08T00:20:24,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742035_1211 (size=31105) 2024-12-08T00:20:24,916 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=215, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/f6e1780ff5ef4bca99f38f179b678194 2024-12-08T00:20:24,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/172dee4b338646a2a7a78080b7505113 is 50, key is test_row_0/B:col10/1733617223524/Put/seqid=0 2024-12-08T00:20:24,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742036_1212 (size=12151) 2024-12-08T00:20:24,951 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=215 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/172dee4b338646a2a7a78080b7505113 2024-12-08T00:20:24,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/5c852641ea9d418e8adae1bcee6a10d2 is 50, key is test_row_0/C:col10/1733617223524/Put/seqid=0 2024-12-08T00:20:24,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742037_1213 (size=12151) 2024-12-08T00:20:25,274 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fabe935a14e4a2f5a6e3e15c47ba0977#A#compaction#178 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:20:25,275 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/1b16a35cc0134a62b25053b7d595c871 is 175, key is test_row_0/A:col10/1733617223453/Put/seqid=0 2024-12-08T00:20:25,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742038_1214 (size=31549) 2024-12-08T00:20:25,293 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/e6b927bd5cfa44dd84022122cd292dd4 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/e6b927bd5cfa44dd84022122cd292dd4 2024-12-08T00:20:25,301 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in fabe935a14e4a2f5a6e3e15c47ba0977/B of fabe935a14e4a2f5a6e3e15c47ba0977 into e6b927bd5cfa44dd84022122cd292dd4(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:20:25,301 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:25,301 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977., storeName=fabe935a14e4a2f5a6e3e15c47ba0977/B, priority=12, startTime=1733617224818; duration=0sec 2024-12-08T00:20:25,302 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:20:25,302 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fabe935a14e4a2f5a6e3e15c47ba0977:B 2024-12-08T00:20:25,302 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T00:20:25,305 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48912 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T00:20:25,305 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): fabe935a14e4a2f5a6e3e15c47ba0977/C is initiating minor compaction (all files) 2024-12-08T00:20:25,305 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fabe935a14e4a2f5a6e3e15c47ba0977/C in TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:25,305 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/c088ddf17e7c484b954d055c1f95a81d, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/219cd8825e9e41fcb0ce46bb6ece5b5d, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/82e6e9ba78c540b88777fb9905536b13, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/76fdac4988a6483eb1bee14e53749a5f] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp, totalSize=47.8 K 2024-12-08T00:20:25,307 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting c088ddf17e7c484b954d055c1f95a81d, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=138, earliestPutTs=1733617220188 2024-12-08T00:20:25,308 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 219cd8825e9e41fcb0ce46bb6ece5b5d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1733617221335 2024-12-08T00:20:25,308 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 82e6e9ba78c540b88777fb9905536b13, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=178, earliestPutTs=1733617221652 2024-12-08T00:20:25,309 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 76fdac4988a6483eb1bee14e53749a5f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=202, earliestPutTs=1733617222844 2024-12-08T00:20:25,340 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fabe935a14e4a2f5a6e3e15c47ba0977#C#compaction#182 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:20:25,341 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/47e48423f633482c909890122e9c90fc is 50, key is test_row_0/C:col10/1733617223453/Put/seqid=0 2024-12-08T00:20:25,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742039_1215 (size=12595) 2024-12-08T00:20:25,364 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/47e48423f633482c909890122e9c90fc as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/47e48423f633482c909890122e9c90fc 2024-12-08T00:20:25,372 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=215 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/5c852641ea9d418e8adae1bcee6a10d2 2024-12-08T00:20:25,373 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in fabe935a14e4a2f5a6e3e15c47ba0977/C of fabe935a14e4a2f5a6e3e15c47ba0977 into 47e48423f633482c909890122e9c90fc(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:20:25,373 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:25,373 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977., storeName=fabe935a14e4a2f5a6e3e15c47ba0977/C, priority=12, startTime=1733617224818; duration=0sec 2024-12-08T00:20:25,374 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:25,374 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fabe935a14e4a2f5a6e3e15c47ba0977:C 2024-12-08T00:20:25,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/f6e1780ff5ef4bca99f38f179b678194 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/f6e1780ff5ef4bca99f38f179b678194 2024-12-08T00:20:25,384 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/f6e1780ff5ef4bca99f38f179b678194, entries=150, sequenceid=215, filesize=30.4 K 2024-12-08T00:20:25,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/172dee4b338646a2a7a78080b7505113 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/172dee4b338646a2a7a78080b7505113 2024-12-08T00:20:25,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,390 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/172dee4b338646a2a7a78080b7505113, entries=150, sequenceid=215, filesize=11.9 K 2024-12-08T00:20:25,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/5c852641ea9d418e8adae1bcee6a10d2 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/5c852641ea9d418e8adae1bcee6a10d2 2024-12-08T00:20:25,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,397 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/5c852641ea9d418e8adae1bcee6a10d2, entries=150, sequenceid=215, filesize=11.9 K 2024-12-08T00:20:25,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,399 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=0 B/0 for fabe935a14e4a2f5a6e3e15c47ba0977 in 571ms, sequenceid=215, compaction requested=false 2024-12-08T00:20:25,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2538): Flush status journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:25,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:25,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=50 2024-12-08T00:20:25,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4106): Remote procedure done, pid=50 2024-12-08T00:20:25,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,404 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=50, resume processing ppid=49 2024-12-08T00:20:25,404 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=50, ppid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6530 sec 2024-12-08T00:20:25,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,406 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees in 1.6570 sec 2024-12-08T00:20:25,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,689 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/1b16a35cc0134a62b25053b7d595c871 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/1b16a35cc0134a62b25053b7d595c871 2024-12-08T00:20:25,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,695 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in fabe935a14e4a2f5a6e3e15c47ba0977/A of fabe935a14e4a2f5a6e3e15c47ba0977 into 1b16a35cc0134a62b25053b7d595c871(size=30.8 K), total size for store is 61.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:20:25,695 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:25,695 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977., storeName=fabe935a14e4a2f5a6e3e15c47ba0977/A, priority=12, startTime=1733617224818; duration=0sec 2024-12-08T00:20:25,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,695 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:25,695 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fabe935a14e4a2f5a6e3e15c47ba0977:A 2024-12-08T00:20:25,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:25,710 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fabe935a14e4a2f5a6e3e15c47ba0977 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-08T00:20:25,711 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=A 2024-12-08T00:20:25,711 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:25,711 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=B 2024-12-08T00:20:25,711 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:25,711 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=C 2024-12-08T00:20:25,711 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:25,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,720 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208c756be75a4404f5a8a41bb012af5ba3a_fabe935a14e4a2f5a6e3e15c47ba0977 is 50, key is test_row_0/A:col10/1733617225705/Put/seqid=0 2024-12-08T00:20:25,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:25,760 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:25,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57412 deadline: 1733617285756, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:25,762 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:25,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733617285758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:25,764 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:25,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617285759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:25,764 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:25,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617285760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:25,764 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:25,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617285760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:25,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742041_1217 (size=24758) 2024-12-08T00:20:25,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-08T00:20:25,854 INFO [Thread-753 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 49 completed 2024-12-08T00:20:25,855 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T00:20:25,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees 2024-12-08T00:20:25,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-08T00:20:25,857 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:20:25,857 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:20:25,857 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=52, ppid=51, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:20:25,864 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:25,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57412 deadline: 1733617285862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:25,865 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:25,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733617285863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:25,867 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:25,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617285866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:25,867 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:25,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617285866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:25,868 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:25,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617285866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:25,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-08T00:20:26,009 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:26,010 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-08T00:20:26,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:26,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. as already flushing 2024-12-08T00:20:26,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:26,010 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:26,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:26,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:26,067 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:26,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733617286066, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:26,068 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:26,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57412 deadline: 1733617286067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:26,069 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:26,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617286068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:26,071 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:26,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617286069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:26,071 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:26,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617286070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:26,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-08T00:20:26,168 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:26,168 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-08T00:20:26,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:26,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. as already flushing 2024-12-08T00:20:26,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:26,169 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:26,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:26,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:26,189 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:26,193 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208c756be75a4404f5a8a41bb012af5ba3a_fabe935a14e4a2f5a6e3e15c47ba0977 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208c756be75a4404f5a8a41bb012af5ba3a_fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:26,194 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/f0c976a676894ae0acbaad490e6af01b, store: [table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:26,196 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/f0c976a676894ae0acbaad490e6af01b is 175, key is test_row_0/A:col10/1733617225705/Put/seqid=0 2024-12-08T00:20:26,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742040_1216 (size=74395) 2024-12-08T00:20:26,320 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:26,321 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-08T00:20:26,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:26,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. as already flushing 2024-12-08T00:20:26,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:26,321 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:26,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:26,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:26,369 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:26,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57412 deadline: 1733617286369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:26,372 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:26,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733617286371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:26,373 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:26,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617286372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:26,373 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:26,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617286372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:26,374 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:26,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617286373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:26,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-08T00:20:26,474 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:26,475 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-08T00:20:26,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:26,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. as already flushing 2024-12-08T00:20:26,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:26,475 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:26,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:26,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:26,599 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=229, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/f0c976a676894ae0acbaad490e6af01b 2024-12-08T00:20:26,611 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/a2bef277e38d465297fc842195e70371 is 50, key is test_row_0/B:col10/1733617225705/Put/seqid=0 2024-12-08T00:20:26,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742042_1218 (size=12151) 2024-12-08T00:20:26,625 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=229 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/a2bef277e38d465297fc842195e70371 2024-12-08T00:20:26,627 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:26,628 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-08T00:20:26,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:26,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. as already flushing 2024-12-08T00:20:26,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:26,628 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:26,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:26,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:26,642 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/a6c599a239a54212936d35ca192f3439 is 50, key is test_row_0/C:col10/1733617225705/Put/seqid=0 2024-12-08T00:20:26,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742043_1219 (size=12151) 2024-12-08T00:20:26,664 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=229 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/a6c599a239a54212936d35ca192f3439 2024-12-08T00:20:26,671 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/f0c976a676894ae0acbaad490e6af01b as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/f0c976a676894ae0acbaad490e6af01b 2024-12-08T00:20:26,680 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/f0c976a676894ae0acbaad490e6af01b, entries=400, sequenceid=229, filesize=72.7 K 2024-12-08T00:20:26,686 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/a2bef277e38d465297fc842195e70371 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/a2bef277e38d465297fc842195e70371 2024-12-08T00:20:26,692 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/a2bef277e38d465297fc842195e70371, entries=150, sequenceid=229, filesize=11.9 K 2024-12-08T00:20:26,694 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/a6c599a239a54212936d35ca192f3439 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/a6c599a239a54212936d35ca192f3439 2024-12-08T00:20:26,700 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/a6c599a239a54212936d35ca192f3439, entries=150, sequenceid=229, filesize=11.9 K 2024-12-08T00:20:26,701 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for fabe935a14e4a2f5a6e3e15c47ba0977 in 991ms, sequenceid=229, compaction requested=true 2024-12-08T00:20:26,701 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:26,702 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fabe935a14e4a2f5a6e3e15c47ba0977:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:20:26,702 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:26,702 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:20:26,702 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:20:26,702 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fabe935a14e4a2f5a6e3e15c47ba0977:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T00:20:26,702 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:26,702 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fabe935a14e4a2f5a6e3e15c47ba0977:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:20:26,702 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:20:26,703 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:20:26,703 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): fabe935a14e4a2f5a6e3e15c47ba0977/B is initiating minor compaction (all files) 2024-12-08T00:20:26,703 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fabe935a14e4a2f5a6e3e15c47ba0977/B in TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:26,703 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/e6b927bd5cfa44dd84022122cd292dd4, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/172dee4b338646a2a7a78080b7505113, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/a2bef277e38d465297fc842195e70371] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp, totalSize=36.0 K 2024-12-08T00:20:26,704 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 137049 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:20:26,704 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): fabe935a14e4a2f5a6e3e15c47ba0977/A is initiating minor compaction (all files) 2024-12-08T00:20:26,704 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting e6b927bd5cfa44dd84022122cd292dd4, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=202, earliestPutTs=1733617222844 2024-12-08T00:20:26,704 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fabe935a14e4a2f5a6e3e15c47ba0977/A in TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:26,704 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/1b16a35cc0134a62b25053b7d595c871, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/f6e1780ff5ef4bca99f38f179b678194, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/f0c976a676894ae0acbaad490e6af01b] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp, totalSize=133.8 K 2024-12-08T00:20:26,704 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:26,704 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. files: [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/1b16a35cc0134a62b25053b7d595c871, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/f6e1780ff5ef4bca99f38f179b678194, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/f0c976a676894ae0acbaad490e6af01b] 2024-12-08T00:20:26,705 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 172dee4b338646a2a7a78080b7505113, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1733617223491 2024-12-08T00:20:26,705 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1b16a35cc0134a62b25053b7d595c871, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=202, earliestPutTs=1733617222844 2024-12-08T00:20:26,705 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting a2bef277e38d465297fc842195e70371, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=229, earliestPutTs=1733617225701 2024-12-08T00:20:26,705 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting f6e1780ff5ef4bca99f38f179b678194, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1733617223491 2024-12-08T00:20:26,706 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting f0c976a676894ae0acbaad490e6af01b, keycount=400, bloomtype=ROW, size=72.7 K, encoding=NONE, compression=NONE, seqNum=229, earliestPutTs=1733617225654 2024-12-08T00:20:26,715 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fabe935a14e4a2f5a6e3e15c47ba0977#B#compaction#186 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:20:26,716 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/f0cd7e9f2700429e9881c8888b4a42df is 50, key is test_row_0/B:col10/1733617225705/Put/seqid=0 2024-12-08T00:20:26,724 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:26,741 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024120836be94b14bab4fc1ad452f8a39c4d79f_fabe935a14e4a2f5a6e3e15c47ba0977 store=[table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:26,744 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024120836be94b14bab4fc1ad452f8a39c4d79f_fabe935a14e4a2f5a6e3e15c47ba0977, store=[table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:26,745 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120836be94b14bab4fc1ad452f8a39c4d79f_fabe935a14e4a2f5a6e3e15c47ba0977 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:26,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742044_1220 (size=12697) 2024-12-08T00:20:26,765 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/f0cd7e9f2700429e9881c8888b4a42df as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/f0cd7e9f2700429e9881c8888b4a42df 2024-12-08T00:20:26,773 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fabe935a14e4a2f5a6e3e15c47ba0977/B of fabe935a14e4a2f5a6e3e15c47ba0977 into f0cd7e9f2700429e9881c8888b4a42df(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:20:26,773 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:26,773 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977., storeName=fabe935a14e4a2f5a6e3e15c47ba0977/B, priority=13, startTime=1733617226702; duration=0sec 2024-12-08T00:20:26,773 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:20:26,773 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fabe935a14e4a2f5a6e3e15c47ba0977:B 2024-12-08T00:20:26,773 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:20:26,774 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:20:26,775 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): fabe935a14e4a2f5a6e3e15c47ba0977/C is initiating minor compaction (all files) 2024-12-08T00:20:26,775 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fabe935a14e4a2f5a6e3e15c47ba0977/C in TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:26,775 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/47e48423f633482c909890122e9c90fc, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/5c852641ea9d418e8adae1bcee6a10d2, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/a6c599a239a54212936d35ca192f3439] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp, totalSize=36.0 K 2024-12-08T00:20:26,776 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 47e48423f633482c909890122e9c90fc, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=202, earliestPutTs=1733617222844 2024-12-08T00:20:26,776 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 5c852641ea9d418e8adae1bcee6a10d2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1733617223491 2024-12-08T00:20:26,777 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting a6c599a239a54212936d35ca192f3439, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=229, earliestPutTs=1733617225701 2024-12-08T00:20:26,781 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:26,782 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-08T00:20:26,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:26,782 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2837): Flushing fabe935a14e4a2f5a6e3e15c47ba0977 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-08T00:20:26,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=A 2024-12-08T00:20:26,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:26,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=B 2024-12-08T00:20:26,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:26,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=C 2024-12-08T00:20:26,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:26,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742045_1221 (size=4469) 2024-12-08T00:20:26,804 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fabe935a14e4a2f5a6e3e15c47ba0977#A#compaction#187 average throughput is 0.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:20:26,805 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/86e06f8fee344023b196c9631c68c9ad is 175, key is test_row_0/A:col10/1733617225705/Put/seqid=0 2024-12-08T00:20:26,824 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fabe935a14e4a2f5a6e3e15c47ba0977#C#compaction#188 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:20:26,825 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/4eba1c0f04424bad927e89195a2b7365 is 50, key is test_row_0/C:col10/1733617225705/Put/seqid=0 2024-12-08T00:20:26,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208a5f71944a47b4242bc82973bb5109783_fabe935a14e4a2f5a6e3e15c47ba0977 is 50, key is test_row_0/A:col10/1733617225758/Put/seqid=0 2024-12-08T00:20:26,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742046_1222 (size=31651) 2024-12-08T00:20:26,852 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/86e06f8fee344023b196c9631c68c9ad as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/86e06f8fee344023b196c9631c68c9ad 2024-12-08T00:20:26,858 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fabe935a14e4a2f5a6e3e15c47ba0977/A of fabe935a14e4a2f5a6e3e15c47ba0977 into 86e06f8fee344023b196c9631c68c9ad(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:20:26,859 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:26,859 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977., storeName=fabe935a14e4a2f5a6e3e15c47ba0977/A, priority=13, startTime=1733617226702; duration=0sec 2024-12-08T00:20:26,859 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:26,859 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fabe935a14e4a2f5a6e3e15c47ba0977:A 2024-12-08T00:20:26,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742047_1223 (size=12697) 2024-12-08T00:20:26,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742048_1224 (size=12304) 2024-12-08T00:20:26,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:26,873 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/4eba1c0f04424bad927e89195a2b7365 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/4eba1c0f04424bad927e89195a2b7365 2024-12-08T00:20:26,874 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. as already flushing 2024-12-08T00:20:26,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:26,880 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208a5f71944a47b4242bc82973bb5109783_fabe935a14e4a2f5a6e3e15c47ba0977 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208a5f71944a47b4242bc82973bb5109783_fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:26,881 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fabe935a14e4a2f5a6e3e15c47ba0977/C of fabe935a14e4a2f5a6e3e15c47ba0977 into 4eba1c0f04424bad927e89195a2b7365(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:20:26,881 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:26,882 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977., storeName=fabe935a14e4a2f5a6e3e15c47ba0977/C, priority=13, startTime=1733617226702; duration=0sec 2024-12-08T00:20:26,882 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:26,882 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fabe935a14e4a2f5a6e3e15c47ba0977:C 2024-12-08T00:20:26,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/9326fa16abb642748ad49000dd7348ea, store: [table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:26,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/9326fa16abb642748ad49000dd7348ea is 175, key is test_row_0/A:col10/1733617225758/Put/seqid=0 2024-12-08T00:20:26,887 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:26,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617286881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:26,891 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:26,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57412 deadline: 1733617286887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:26,891 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:26,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617286887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:26,892 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:26,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733617286889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:26,891 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:26,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617286888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:26,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742049_1225 (size=31105) 2024-12-08T00:20:26,922 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=256, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/9326fa16abb642748ad49000dd7348ea 2024-12-08T00:20:26,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/03d2f21f914444f1820f41c9b99f0d07 is 50, key is test_row_0/B:col10/1733617225758/Put/seqid=0 2024-12-08T00:20:26,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-08T00:20:26,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742050_1226 (size=12151) 2024-12-08T00:20:26,988 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=256 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/03d2f21f914444f1820f41c9b99f0d07 2024-12-08T00:20:26,992 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:26,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617286989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:26,994 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:26,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57412 deadline: 1733617286992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:26,995 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:26,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733617286993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:26,997 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:26,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617286994, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:26,999 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:27,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617286995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:27,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/2b19fdc90b8246578fb9a406e39eaefc is 50, key is test_row_0/C:col10/1733617225758/Put/seqid=0 2024-12-08T00:20:27,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742051_1227 (size=12151) 2024-12-08T00:20:27,195 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:27,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617287195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:27,197 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:27,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57412 deadline: 1733617287196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:27,198 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:27,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733617287197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:27,200 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:27,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617287199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:27,204 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:27,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617287203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:27,422 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=256 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/2b19fdc90b8246578fb9a406e39eaefc 2024-12-08T00:20:27,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/9326fa16abb642748ad49000dd7348ea as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/9326fa16abb642748ad49000dd7348ea 2024-12-08T00:20:27,436 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/9326fa16abb642748ad49000dd7348ea, entries=150, sequenceid=256, filesize=30.4 K 2024-12-08T00:20:27,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/03d2f21f914444f1820f41c9b99f0d07 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/03d2f21f914444f1820f41c9b99f0d07 2024-12-08T00:20:27,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,443 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/03d2f21f914444f1820f41c9b99f0d07, entries=150, sequenceid=256, filesize=11.9 K 2024-12-08T00:20:27,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/2b19fdc90b8246578fb9a406e39eaefc as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/2b19fdc90b8246578fb9a406e39eaefc 2024-12-08T00:20:27,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,451 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/2b19fdc90b8246578fb9a406e39eaefc, entries=150, sequenceid=256, filesize=11.9 K 2024-12-08T00:20:27,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,452 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for fabe935a14e4a2f5a6e3e15c47ba0977 in 670ms, sequenceid=256, compaction requested=false 2024-12-08T00:20:27,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2538): Flush status journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:27,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:27,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=52 2024-12-08T00:20:27,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4106): Remote procedure done, pid=52 2024-12-08T00:20:27,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,456 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=52, resume processing ppid=51 2024-12-08T00:20:27,456 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=52, ppid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5980 sec 2024-12-08T00:20:27,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,458 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees in 1.6020 sec 2024-12-08T00:20:27,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:27,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,502 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fabe935a14e4a2f5a6e3e15c47ba0977 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-08T00:20:27,502 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=A 2024-12-08T00:20:27,503 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:27,503 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=B 2024-12-08T00:20:27,503 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:27,503 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=C 2024-12-08T00:20:27,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,503 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:27,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,520 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412087051836a414b4eb382f95f27559dc0ca_fabe935a14e4a2f5a6e3e15c47ba0977 is 50, key is test_row_0/A:col10/1733617227501/Put/seqid=0 2024-12-08T00:20:27,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742052_1228 (size=12454) 2024-12-08T00:20:27,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,544 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:27,549 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412087051836a414b4eb382f95f27559dc0ca_fabe935a14e4a2f5a6e3e15c47ba0977 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412087051836a414b4eb382f95f27559dc0ca_fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:27,550 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/7f9247fe2f7c4aa0b792ef3b3a65bf75, store: [table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:27,551 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:27,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617287547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:27,551 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/7f9247fe2f7c4aa0b792ef3b3a65bf75 is 175, key is test_row_0/A:col10/1733617227501/Put/seqid=0 2024-12-08T00:20:27,551 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:27,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733617287547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:27,552 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:27,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617287549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:27,552 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:27,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617287549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:27,552 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:27,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57412 deadline: 1733617287550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:27,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742053_1229 (size=31251) 2024-12-08T00:20:27,567 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=270, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/7f9247fe2f7c4aa0b792ef3b3a65bf75 2024-12-08T00:20:27,595 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/a36ce5d4ef3b4778af24c02a6d60c0cc is 50, key is test_row_0/B:col10/1733617227501/Put/seqid=0 2024-12-08T00:20:27,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742054_1230 (size=9857) 2024-12-08T00:20:27,652 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:27,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617287652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:27,653 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:27,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733617287653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:27,655 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:27,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57412 deadline: 1733617287654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:27,655 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:27,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617287654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:27,655 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:27,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617287654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:27,857 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:27,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617287855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:27,857 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:27,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733617287855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:27,858 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:27,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57412 deadline: 1733617287856, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:27,858 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:27,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617287856, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:27,858 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:27,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617287857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:27,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-08T00:20:27,960 INFO [Thread-753 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 51 completed 2024-12-08T00:20:27,962 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T00:20:27,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees 2024-12-08T00:20:27,963 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:20:27,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-08T00:20:27,964 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:20:27,964 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:20:28,020 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=270 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/a36ce5d4ef3b4778af24c02a6d60c0cc 2024-12-08T00:20:28,028 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/816ea7d000104ac885a8f10c796bf2f4 is 50, key is test_row_0/C:col10/1733617227501/Put/seqid=0 2024-12-08T00:20:28,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742055_1231 (size=9857) 2024-12-08T00:20:28,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-08T00:20:28,119 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:28,120 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-08T00:20:28,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:28,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. as already flushing 2024-12-08T00:20:28,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:28,120 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:28,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:28,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:28,159 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:28,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57412 deadline: 1733617288159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:28,160 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:28,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617288159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:28,160 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:28,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617288159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:28,162 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:28,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733617288160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:28,162 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:28,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617288161, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:28,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-08T00:20:28,272 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:28,273 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-08T00:20:28,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:28,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. as already flushing 2024-12-08T00:20:28,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:28,273 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:28,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:28,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:28,425 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:28,426 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-08T00:20:28,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:28,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. as already flushing 2024-12-08T00:20:28,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:28,426 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:28,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:28,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:28,433 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=270 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/816ea7d000104ac885a8f10c796bf2f4 2024-12-08T00:20:28,446 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/7f9247fe2f7c4aa0b792ef3b3a65bf75 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/7f9247fe2f7c4aa0b792ef3b3a65bf75 2024-12-08T00:20:28,450 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/7f9247fe2f7c4aa0b792ef3b3a65bf75, entries=150, sequenceid=270, filesize=30.5 K 2024-12-08T00:20:28,451 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/a36ce5d4ef3b4778af24c02a6d60c0cc as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/a36ce5d4ef3b4778af24c02a6d60c0cc 2024-12-08T00:20:28,455 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/a36ce5d4ef3b4778af24c02a6d60c0cc, entries=100, sequenceid=270, filesize=9.6 K 2024-12-08T00:20:28,456 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/816ea7d000104ac885a8f10c796bf2f4 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/816ea7d000104ac885a8f10c796bf2f4 2024-12-08T00:20:28,461 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/816ea7d000104ac885a8f10c796bf2f4, entries=100, sequenceid=270, filesize=9.6 K 2024-12-08T00:20:28,461 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for fabe935a14e4a2f5a6e3e15c47ba0977 in 959ms, sequenceid=270, compaction requested=true 2024-12-08T00:20:28,462 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:28,462 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:20:28,462 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fabe935a14e4a2f5a6e3e15c47ba0977:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:20:28,462 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:28,462 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fabe935a14e4a2f5a6e3e15c47ba0977:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T00:20:28,462 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:28,462 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:20:28,462 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fabe935a14e4a2f5a6e3e15c47ba0977:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:20:28,462 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:20:28,463 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94007 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:20:28,463 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): fabe935a14e4a2f5a6e3e15c47ba0977/A is initiating minor compaction (all files) 2024-12-08T00:20:28,463 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fabe935a14e4a2f5a6e3e15c47ba0977/A in TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:28,463 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/86e06f8fee344023b196c9631c68c9ad, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/9326fa16abb642748ad49000dd7348ea, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/7f9247fe2f7c4aa0b792ef3b3a65bf75] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp, totalSize=91.8 K 2024-12-08T00:20:28,463 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:28,463 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. files: [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/86e06f8fee344023b196c9631c68c9ad, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/9326fa16abb642748ad49000dd7348ea, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/7f9247fe2f7c4aa0b792ef3b3a65bf75] 2024-12-08T00:20:28,464 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34705 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:20:28,464 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): fabe935a14e4a2f5a6e3e15c47ba0977/B is initiating minor compaction (all files) 2024-12-08T00:20:28,464 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fabe935a14e4a2f5a6e3e15c47ba0977/B in TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:28,464 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/f0cd7e9f2700429e9881c8888b4a42df, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/03d2f21f914444f1820f41c9b99f0d07, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/a36ce5d4ef3b4778af24c02a6d60c0cc] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp, totalSize=33.9 K 2024-12-08T00:20:28,464 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 86e06f8fee344023b196c9631c68c9ad, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=229, earliestPutTs=1733617225701 2024-12-08T00:20:28,464 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting f0cd7e9f2700429e9881c8888b4a42df, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=229, earliestPutTs=1733617225701 2024-12-08T00:20:28,465 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9326fa16abb642748ad49000dd7348ea, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=256, earliestPutTs=1733617225758 2024-12-08T00:20:28,465 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 03d2f21f914444f1820f41c9b99f0d07, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=256, earliestPutTs=1733617225758 2024-12-08T00:20:28,465 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7f9247fe2f7c4aa0b792ef3b3a65bf75, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1733617226886 2024-12-08T00:20:28,465 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting a36ce5d4ef3b4778af24c02a6d60c0cc, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1733617226886 2024-12-08T00:20:28,482 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fabe935a14e4a2f5a6e3e15c47ba0977#B#compaction#195 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:20:28,482 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/8571267574bd49c9871e43ea5dee83ab is 50, key is test_row_0/B:col10/1733617227501/Put/seqid=0 2024-12-08T00:20:28,484 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:28,500 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241208d5433e8e157c406aa90b4fc9f0f414c5_fabe935a14e4a2f5a6e3e15c47ba0977 store=[table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:28,503 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241208d5433e8e157c406aa90b4fc9f0f414c5_fabe935a14e4a2f5a6e3e15c47ba0977, store=[table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:28,503 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208d5433e8e157c406aa90b4fc9f0f414c5_fabe935a14e4a2f5a6e3e15c47ba0977 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:28,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742056_1232 (size=12899) 2024-12-08T00:20:28,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742057_1233 (size=4469) 2024-12-08T00:20:28,541 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fabe935a14e4a2f5a6e3e15c47ba0977#A#compaction#196 average throughput is 0.43 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:20:28,542 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/4d82ad209cde408bafe8a9d9391c1f36 is 175, key is test_row_0/A:col10/1733617227501/Put/seqid=0 2024-12-08T00:20:28,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742058_1234 (size=31960) 2024-12-08T00:20:28,560 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/4d82ad209cde408bafe8a9d9391c1f36 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/4d82ad209cde408bafe8a9d9391c1f36 2024-12-08T00:20:28,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-08T00:20:28,567 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fabe935a14e4a2f5a6e3e15c47ba0977/A of fabe935a14e4a2f5a6e3e15c47ba0977 into 4d82ad209cde408bafe8a9d9391c1f36(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:20:28,567 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:28,567 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977., storeName=fabe935a14e4a2f5a6e3e15c47ba0977/A, priority=13, startTime=1733617228462; duration=0sec 2024-12-08T00:20:28,568 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:20:28,568 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fabe935a14e4a2f5a6e3e15c47ba0977:A 2024-12-08T00:20:28,568 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:20:28,570 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34705 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:20:28,570 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): fabe935a14e4a2f5a6e3e15c47ba0977/C is initiating minor compaction (all files) 2024-12-08T00:20:28,570 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fabe935a14e4a2f5a6e3e15c47ba0977/C in TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:28,570 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/4eba1c0f04424bad927e89195a2b7365, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/2b19fdc90b8246578fb9a406e39eaefc, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/816ea7d000104ac885a8f10c796bf2f4] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp, totalSize=33.9 K 2024-12-08T00:20:28,571 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4eba1c0f04424bad927e89195a2b7365, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=229, earliestPutTs=1733617225701 2024-12-08T00:20:28,571 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2b19fdc90b8246578fb9a406e39eaefc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=256, earliestPutTs=1733617225758 2024-12-08T00:20:28,571 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 816ea7d000104ac885a8f10c796bf2f4, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1733617226886 2024-12-08T00:20:28,578 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:28,579 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-08T00:20:28,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:28,579 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2837): Flushing fabe935a14e4a2f5a6e3e15c47ba0977 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-08T00:20:28,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=A 2024-12-08T00:20:28,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:28,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=B 2024-12-08T00:20:28,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:28,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=C 2024-12-08T00:20:28,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:28,584 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fabe935a14e4a2f5a6e3e15c47ba0977#C#compaction#197 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:20:28,585 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/b7f258f78ddd42c491582fcc69c79183 is 50, key is test_row_0/C:col10/1733617227501/Put/seqid=0 2024-12-08T00:20:28,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120836a0c280a92044aab05efb7316c191b3_fabe935a14e4a2f5a6e3e15c47ba0977 is 50, key is test_row_0/A:col10/1733617227548/Put/seqid=0 2024-12-08T00:20:28,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742059_1235 (size=12899) 2024-12-08T00:20:28,612 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/b7f258f78ddd42c491582fcc69c79183 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/b7f258f78ddd42c491582fcc69c79183 2024-12-08T00:20:28,619 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fabe935a14e4a2f5a6e3e15c47ba0977/C of fabe935a14e4a2f5a6e3e15c47ba0977 into b7f258f78ddd42c491582fcc69c79183(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:20:28,619 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:28,619 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977., storeName=fabe935a14e4a2f5a6e3e15c47ba0977/C, priority=13, startTime=1733617228462; duration=0sec 2024-12-08T00:20:28,619 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:28,619 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fabe935a14e4a2f5a6e3e15c47ba0977:C 2024-12-08T00:20:28,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742060_1236 (size=12454) 2024-12-08T00:20:28,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:28,655 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120836a0c280a92044aab05efb7316c191b3_fabe935a14e4a2f5a6e3e15c47ba0977 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120836a0c280a92044aab05efb7316c191b3_fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:28,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/f95c3a60a958461bb51c782273339764, store: [table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:28,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/f95c3a60a958461bb51c782273339764 is 175, key is test_row_0/A:col10/1733617227548/Put/seqid=0 2024-12-08T00:20:28,663 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. as already flushing 2024-12-08T00:20:28,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:28,675 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:28,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617288672, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:28,675 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:28,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57412 deadline: 1733617288672, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:28,676 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:28,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733617288673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:28,677 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:28,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617288674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:28,679 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:28,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617288675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:28,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742061_1237 (size=31255) 2024-12-08T00:20:28,777 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:28,777 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:28,777 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:28,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617288777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:28,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733617288777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:28,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57412 deadline: 1733617288777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:28,784 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:28,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617288784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:28,789 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:28,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617288788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:28,929 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/8571267574bd49c9871e43ea5dee83ab as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/8571267574bd49c9871e43ea5dee83ab 2024-12-08T00:20:28,936 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fabe935a14e4a2f5a6e3e15c47ba0977/B of fabe935a14e4a2f5a6e3e15c47ba0977 into 8571267574bd49c9871e43ea5dee83ab(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:20:28,936 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:28,936 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977., storeName=fabe935a14e4a2f5a6e3e15c47ba0977/B, priority=13, startTime=1733617228462; duration=0sec 2024-12-08T00:20:28,936 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:28,936 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fabe935a14e4a2f5a6e3e15c47ba0977:B 2024-12-08T00:20:28,981 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:28,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617288979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:28,983 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:28,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57412 deadline: 1733617288982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:28,987 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:28,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733617288986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:28,987 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:28,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617288986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:28,991 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:28,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617288990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:29,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-08T00:20:29,093 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=296, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/f95c3a60a958461bb51c782273339764 2024-12-08T00:20:29,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/50154135ce3b49418889ca8ec66fe813 is 50, key is test_row_0/B:col10/1733617227548/Put/seqid=0 2024-12-08T00:20:29,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742062_1238 (size=12301) 2024-12-08T00:20:29,283 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:29,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617289283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:29,287 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:29,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57412 deadline: 1733617289285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:29,289 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:29,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617289288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:29,291 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:29,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733617289290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:29,294 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:29,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617289294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:29,515 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=296 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/50154135ce3b49418889ca8ec66fe813 2024-12-08T00:20:29,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/9a84cebd03ae4725b8547aaccec403d5 is 50, key is test_row_0/C:col10/1733617227548/Put/seqid=0 2024-12-08T00:20:29,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742063_1239 (size=12301) 2024-12-08T00:20:29,541 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=296 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/9a84cebd03ae4725b8547aaccec403d5 2024-12-08T00:20:29,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/f95c3a60a958461bb51c782273339764 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/f95c3a60a958461bb51c782273339764 2024-12-08T00:20:29,551 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/f95c3a60a958461bb51c782273339764, entries=150, sequenceid=296, filesize=30.5 K 2024-12-08T00:20:29,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/50154135ce3b49418889ca8ec66fe813 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/50154135ce3b49418889ca8ec66fe813 2024-12-08T00:20:29,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,557 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/50154135ce3b49418889ca8ec66fe813, entries=150, sequenceid=296, filesize=12.0 K 2024-12-08T00:20:29,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/9a84cebd03ae4725b8547aaccec403d5 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/9a84cebd03ae4725b8547aaccec403d5 2024-12-08T00:20:29,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,564 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/9a84cebd03ae4725b8547aaccec403d5, entries=150, sequenceid=296, filesize=12.0 K 2024-12-08T00:20:29,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,565 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for fabe935a14e4a2f5a6e3e15c47ba0977 in 986ms, sequenceid=296, compaction requested=false 2024-12-08T00:20:29,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2538): Flush status journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:29,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:29,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=54 2024-12-08T00:20:29,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4106): Remote procedure done, pid=54 2024-12-08T00:20:29,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,573 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=54, resume processing ppid=53 2024-12-08T00:20:29,573 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=54, ppid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6030 sec 2024-12-08T00:20:29,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,575 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees in 1.6110 sec 2024-12-08T00:20:29,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,788 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fabe935a14e4a2f5a6e3e15c47ba0977 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-08T00:20:29,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=A 2024-12-08T00:20:29,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:29,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=B 2024-12-08T00:20:29,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:29,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=C 2024-12-08T00:20:29,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:29,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:29,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,797 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208fce29592d5d944b0844d950319d8de4c_fabe935a14e4a2f5a6e3e15c47ba0977 is 50, key is test_row_1/A:col10/1733617229787/Put/seqid=0 2024-12-08T00:20:29,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742064_1240 (size=12454) 2024-12-08T00:20:29,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,804 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,809 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208fce29592d5d944b0844d950319d8de4c_fabe935a14e4a2f5a6e3e15c47ba0977 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208fce29592d5d944b0844d950319d8de4c_fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:29,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,810 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/2d86c0b631ed4c43b21eb58315e47e80, store: [table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:29,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,811 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/2d86c0b631ed4c43b21eb58315e47e80 is 175, key is test_row_1/A:col10/1733617229787/Put/seqid=0 2024-12-08T00:20:29,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:29,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742065_1241 (size=31251) 2024-12-08T00:20:29,840 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=310, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/2d86c0b631ed4c43b21eb58315e47e80 2024-12-08T00:20:29,854 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/96a69ca2169642e88a2554f78de9d0f0 is 50, key is test_row_1/B:col10/1733617229787/Put/seqid=0 2024-12-08T00:20:29,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742066_1242 (size=9857) 2024-12-08T00:20:29,870 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=310 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/96a69ca2169642e88a2554f78de9d0f0 2024-12-08T00:20:29,879 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/d76fecdc497a4f5ab38bb418895a7f65 is 50, key is test_row_1/C:col10/1733617229787/Put/seqid=0 2024-12-08T00:20:29,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742067_1243 (size=9857) 2024-12-08T00:20:29,894 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:29,894 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:29,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617289890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:29,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617289891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:29,894 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:29,895 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:29,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57412 deadline: 1733617289891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:29,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617289892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:29,896 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:29,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733617289894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:29,997 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:29,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617289995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:29,997 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:29,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617289996, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:29,998 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:29,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617289996, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:29,998 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:29,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57412 deadline: 1733617289996, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:29,998 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:29,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733617289997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:30,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-08T00:20:30,068 INFO [Thread-753 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 53 completed 2024-12-08T00:20:30,069 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T00:20:30,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees 2024-12-08T00:20:30,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-08T00:20:30,071 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:20:30,071 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:20:30,072 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=56, ppid=55, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:20:30,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-08T00:20:30,200 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:30,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617290199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:30,201 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:30,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617290199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:30,201 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:30,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617290200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:30,201 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:30,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733617290200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:30,202 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:30,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57412 deadline: 1733617290200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:30,224 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:30,224 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-08T00:20:30,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:30,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. as already flushing 2024-12-08T00:20:30,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:30,225 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:30,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:30,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:30,287 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=310 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/d76fecdc497a4f5ab38bb418895a7f65 2024-12-08T00:20:30,293 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/2d86c0b631ed4c43b21eb58315e47e80 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/2d86c0b631ed4c43b21eb58315e47e80 2024-12-08T00:20:30,298 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/2d86c0b631ed4c43b21eb58315e47e80, entries=150, sequenceid=310, filesize=30.5 K 2024-12-08T00:20:30,299 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/96a69ca2169642e88a2554f78de9d0f0 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/96a69ca2169642e88a2554f78de9d0f0 2024-12-08T00:20:30,305 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/96a69ca2169642e88a2554f78de9d0f0, entries=100, sequenceid=310, filesize=9.6 K 2024-12-08T00:20:30,306 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/d76fecdc497a4f5ab38bb418895a7f65 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/d76fecdc497a4f5ab38bb418895a7f65 2024-12-08T00:20:30,312 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/d76fecdc497a4f5ab38bb418895a7f65, entries=100, sequenceid=310, filesize=9.6 K 2024-12-08T00:20:30,316 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for fabe935a14e4a2f5a6e3e15c47ba0977 in 527ms, sequenceid=310, compaction requested=true 2024-12-08T00:20:30,316 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:30,316 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fabe935a14e4a2f5a6e3e15c47ba0977:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:20:30,316 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:30,316 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:20:30,316 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:20:30,316 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fabe935a14e4a2f5a6e3e15c47ba0977:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T00:20:30,316 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:30,316 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fabe935a14e4a2f5a6e3e15c47ba0977:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:20:30,317 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:20:30,318 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35057 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:20:30,318 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): fabe935a14e4a2f5a6e3e15c47ba0977/B is initiating minor compaction (all files) 2024-12-08T00:20:30,318 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fabe935a14e4a2f5a6e3e15c47ba0977/B in TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:30,318 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/8571267574bd49c9871e43ea5dee83ab, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/50154135ce3b49418889ca8ec66fe813, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/96a69ca2169642e88a2554f78de9d0f0] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp, totalSize=34.2 K 2024-12-08T00:20:30,319 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94466 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:20:30,319 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 8571267574bd49c9871e43ea5dee83ab, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1733617225759 2024-12-08T00:20:30,319 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): fabe935a14e4a2f5a6e3e15c47ba0977/A is initiating minor compaction (all files) 2024-12-08T00:20:30,319 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fabe935a14e4a2f5a6e3e15c47ba0977/A in TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:30,319 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/4d82ad209cde408bafe8a9d9391c1f36, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/f95c3a60a958461bb51c782273339764, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/2d86c0b631ed4c43b21eb58315e47e80] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp, totalSize=92.3 K 2024-12-08T00:20:30,319 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:30,319 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. files: [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/4d82ad209cde408bafe8a9d9391c1f36, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/f95c3a60a958461bb51c782273339764, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/2d86c0b631ed4c43b21eb58315e47e80] 2024-12-08T00:20:30,320 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 50154135ce3b49418889ca8ec66fe813, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=296, earliestPutTs=1733617227547 2024-12-08T00:20:30,320 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4d82ad209cde408bafe8a9d9391c1f36, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1733617225759 2024-12-08T00:20:30,321 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 96a69ca2169642e88a2554f78de9d0f0, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=310, earliestPutTs=1733617228663 2024-12-08T00:20:30,321 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting f95c3a60a958461bb51c782273339764, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=296, earliestPutTs=1733617227547 2024-12-08T00:20:30,322 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2d86c0b631ed4c43b21eb58315e47e80, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=310, earliestPutTs=1733617228663 2024-12-08T00:20:30,331 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fabe935a14e4a2f5a6e3e15c47ba0977#B#compaction#204 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:20:30,332 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/fe1a5caa31e844b6bd0e7437631b1ea6 is 50, key is test_row_0/B:col10/1733617227548/Put/seqid=0 2024-12-08T00:20:30,338 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:30,344 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241208cf689009920a4c3680916f62810537f0_fabe935a14e4a2f5a6e3e15c47ba0977 store=[table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:30,346 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241208cf689009920a4c3680916f62810537f0_fabe935a14e4a2f5a6e3e15c47ba0977, store=[table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:30,347 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208cf689009920a4c3680916f62810537f0_fabe935a14e4a2f5a6e3e15c47ba0977 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:30,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742068_1244 (size=13051) 2024-12-08T00:20:30,356 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/fe1a5caa31e844b6bd0e7437631b1ea6 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/fe1a5caa31e844b6bd0e7437631b1ea6 2024-12-08T00:20:30,362 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fabe935a14e4a2f5a6e3e15c47ba0977/B of fabe935a14e4a2f5a6e3e15c47ba0977 into fe1a5caa31e844b6bd0e7437631b1ea6(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:20:30,362 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:30,362 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977., storeName=fabe935a14e4a2f5a6e3e15c47ba0977/B, priority=13, startTime=1733617230316; duration=0sec 2024-12-08T00:20:30,362 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:20:30,362 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fabe935a14e4a2f5a6e3e15c47ba0977:B 2024-12-08T00:20:30,362 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:20:30,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742069_1245 (size=4469) 2024-12-08T00:20:30,364 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35057 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:20:30,364 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): fabe935a14e4a2f5a6e3e15c47ba0977/C is initiating minor compaction (all files) 2024-12-08T00:20:30,364 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fabe935a14e4a2f5a6e3e15c47ba0977/C in TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:30,364 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/b7f258f78ddd42c491582fcc69c79183, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/9a84cebd03ae4725b8547aaccec403d5, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/d76fecdc497a4f5ab38bb418895a7f65] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp, totalSize=34.2 K 2024-12-08T00:20:30,366 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting b7f258f78ddd42c491582fcc69c79183, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1733617225759 2024-12-08T00:20:30,367 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fabe935a14e4a2f5a6e3e15c47ba0977#A#compaction#205 average throughput is 0.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:20:30,367 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/6041a372712a4c8aa781b8b489523d1e is 175, key is test_row_0/A:col10/1733617227548/Put/seqid=0 2024-12-08T00:20:30,368 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 9a84cebd03ae4725b8547aaccec403d5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=296, earliestPutTs=1733617227547 2024-12-08T00:20:30,369 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting d76fecdc497a4f5ab38bb418895a7f65, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=310, earliestPutTs=1733617228663 2024-12-08T00:20:30,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-08T00:20:30,377 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:30,377 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-08T00:20:30,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:30,378 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2837): Flushing fabe935a14e4a2f5a6e3e15c47ba0977 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-08T00:20:30,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=A 2024-12-08T00:20:30,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:30,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=B 2024-12-08T00:20:30,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:30,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=C 2024-12-08T00:20:30,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:30,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742070_1246 (size=32112) 2024-12-08T00:20:30,386 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fabe935a14e4a2f5a6e3e15c47ba0977#C#compaction#206 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:20:30,386 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/5b969c71c875494e99f1c679b3d7ba13 is 50, key is test_row_0/C:col10/1733617227548/Put/seqid=0 2024-12-08T00:20:30,390 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/6041a372712a4c8aa781b8b489523d1e as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/6041a372712a4c8aa781b8b489523d1e 2024-12-08T00:20:30,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120844ef17b570f246b284e81b62bcb2d8e6_fabe935a14e4a2f5a6e3e15c47ba0977 is 50, key is test_row_0/A:col10/1733617229891/Put/seqid=0 2024-12-08T00:20:30,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742071_1247 (size=13051) 2024-12-08T00:20:30,397 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fabe935a14e4a2f5a6e3e15c47ba0977/A of fabe935a14e4a2f5a6e3e15c47ba0977 into 6041a372712a4c8aa781b8b489523d1e(size=31.4 K), total size for store is 31.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:20:30,397 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:30,397 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977., storeName=fabe935a14e4a2f5a6e3e15c47ba0977/A, priority=13, startTime=1733617230316; duration=0sec 2024-12-08T00:20:30,397 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:30,397 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fabe935a14e4a2f5a6e3e15c47ba0977:A 2024-12-08T00:20:30,399 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/5b969c71c875494e99f1c679b3d7ba13 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/5b969c71c875494e99f1c679b3d7ba13 2024-12-08T00:20:30,406 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fabe935a14e4a2f5a6e3e15c47ba0977/C of fabe935a14e4a2f5a6e3e15c47ba0977 into 5b969c71c875494e99f1c679b3d7ba13(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:20:30,406 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:30,406 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977., storeName=fabe935a14e4a2f5a6e3e15c47ba0977/C, priority=13, startTime=1733617230316; duration=0sec 2024-12-08T00:20:30,406 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:30,406 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fabe935a14e4a2f5a6e3e15c47ba0977:C 2024-12-08T00:20:30,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742072_1248 (size=12454) 2024-12-08T00:20:30,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,424 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120844ef17b570f246b284e81b62bcb2d8e6_fabe935a14e4a2f5a6e3e15c47ba0977 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120844ef17b570f246b284e81b62bcb2d8e6_fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:30,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/cde9ede3f95b4779994aee82a725b18f, store: [table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:30,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/cde9ede3f95b4779994aee82a725b18f is 175, key is test_row_0/A:col10/1733617229891/Put/seqid=0 2024-12-08T00:20:30,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742073_1249 (size=31255) 2024-12-08T00:20:30,451 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=335, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/cde9ede3f95b4779994aee82a725b18f 2024-12-08T00:20:30,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/0380c3584cd948cc8bb435906fe933eb is 50, key is test_row_0/B:col10/1733617229891/Put/seqid=0 2024-12-08T00:20:30,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742074_1250 (size=12301) 2024-12-08T00:20:30,500 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=335 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/0380c3584cd948cc8bb435906fe933eb 2024-12-08T00:20:30,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:30,505 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. as already flushing 2024-12-08T00:20:30,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/67ae085ea1044a3b97ef3e3ce39a35fd is 50, key is test_row_0/C:col10/1733617229891/Put/seqid=0 2024-12-08T00:20:30,517 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:30,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57412 deadline: 1733617290510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:30,518 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:30,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617290513, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:30,519 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:30,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617290517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:30,520 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:30,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617290517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:30,520 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:30,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733617290517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:30,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742075_1251 (size=12301) 2024-12-08T00:20:30,534 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=335 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/67ae085ea1044a3b97ef3e3ce39a35fd 2024-12-08T00:20:30,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/cde9ede3f95b4779994aee82a725b18f as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/cde9ede3f95b4779994aee82a725b18f 2024-12-08T00:20:30,549 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/cde9ede3f95b4779994aee82a725b18f, entries=150, sequenceid=335, filesize=30.5 K 2024-12-08T00:20:30,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/0380c3584cd948cc8bb435906fe933eb as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/0380c3584cd948cc8bb435906fe933eb 2024-12-08T00:20:30,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,556 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/0380c3584cd948cc8bb435906fe933eb, entries=150, sequenceid=335, filesize=12.0 K 2024-12-08T00:20:30,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/67ae085ea1044a3b97ef3e3ce39a35fd as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/67ae085ea1044a3b97ef3e3ce39a35fd 2024-12-08T00:20:30,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,565 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/67ae085ea1044a3b97ef3e3ce39a35fd, entries=150, sequenceid=335, filesize=12.0 K 2024-12-08T00:20:30,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,566 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=73.80 KB/75570 for fabe935a14e4a2f5a6e3e15c47ba0977 in 188ms, sequenceid=335, compaction requested=false 2024-12-08T00:20:30,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2538): Flush status journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:30,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:30,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=56 2024-12-08T00:20:30,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4106): Remote procedure done, pid=56 2024-12-08T00:20:30,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,570 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=56, resume processing ppid=55 2024-12-08T00:20:30,570 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=56, ppid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 497 msec 2024-12-08T00:20:30,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,571 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees in 501 msec 2024-12-08T00:20:30,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:30,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,630 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fabe935a14e4a2f5a6e3e15c47ba0977 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-08T00:20:30,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,631 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=A 2024-12-08T00:20:30,632 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:30,632 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=B 2024-12-08T00:20:30,632 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:30,632 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=C 2024-12-08T00:20:30,632 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:30,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,642 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208667814061afa4714b0a2e4813bcb9172_fabe935a14e4a2f5a6e3e15c47ba0977 is 50, key is test_row_0/A:col10/1733617230630/Put/seqid=0 2024-12-08T00:20:30,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,658 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:30,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733617290653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:30,658 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:30,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57412 deadline: 1733617290653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:30,660 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:30,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617290655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:30,661 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:30,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617290657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:30,661 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:30,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617290658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:30,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-08T00:20:30,674 INFO [Thread-753 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 55 completed 2024-12-08T00:20:30,675 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T00:20:30,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees 2024-12-08T00:20:30,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742076_1252 (size=14994) 2024-12-08T00:20:30,677 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:20:30,677 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:20:30,677 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:20:30,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-08T00:20:30,678 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:30,683 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208667814061afa4714b0a2e4813bcb9172_fabe935a14e4a2f5a6e3e15c47ba0977 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208667814061afa4714b0a2e4813bcb9172_fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:30,684 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/b05ee7e560bd44879f964bfe89d616e6, store: [table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:30,684 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/b05ee7e560bd44879f964bfe89d616e6 is 175, key is test_row_0/A:col10/1733617230630/Put/seqid=0 2024-12-08T00:20:30,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742077_1253 (size=39949) 2024-12-08T00:20:30,762 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:30,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733617290759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:30,763 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:30,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57412 deadline: 1733617290759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:30,763 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:30,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617290762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:30,764 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:30,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617290762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:30,766 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:30,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617290765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:30,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-08T00:20:30,829 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:30,830 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-08T00:20:30,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:30,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. as already flushing 2024-12-08T00:20:30,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:30,830 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:30,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:30,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:30,966 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:30,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57412 deadline: 1733617290964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:30,966 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:30,966 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:30,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617290966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:30,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733617290965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:30,966 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:30,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617290966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:30,969 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:30,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617290968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:30,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-08T00:20:30,982 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:30,983 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-08T00:20:30,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:30,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. as already flushing 2024-12-08T00:20:30,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:30,983 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:30,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:30,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:31,092 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=353, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/b05ee7e560bd44879f964bfe89d616e6 2024-12-08T00:20:31,101 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/8449e52d37fb4c028d3caf1966268c62 is 50, key is test_row_0/B:col10/1733617230630/Put/seqid=0 2024-12-08T00:20:31,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742078_1254 (size=12301) 2024-12-08T00:20:31,119 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=353 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/8449e52d37fb4c028d3caf1966268c62 2024-12-08T00:20:31,128 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/bc0ef8c104114203bb24f3708a0954b4 is 50, key is test_row_0/C:col10/1733617230630/Put/seqid=0 2024-12-08T00:20:31,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742079_1255 (size=12301) 2024-12-08T00:20:31,135 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=353 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/bc0ef8c104114203bb24f3708a0954b4 2024-12-08T00:20:31,135 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:31,136 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-08T00:20:31,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:31,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. as already flushing 2024-12-08T00:20:31,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:31,136 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:31,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:31,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:31,142 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/b05ee7e560bd44879f964bfe89d616e6 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/b05ee7e560bd44879f964bfe89d616e6 2024-12-08T00:20:31,146 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/b05ee7e560bd44879f964bfe89d616e6, entries=200, sequenceid=353, filesize=39.0 K 2024-12-08T00:20:31,148 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/8449e52d37fb4c028d3caf1966268c62 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/8449e52d37fb4c028d3caf1966268c62 2024-12-08T00:20:31,153 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/8449e52d37fb4c028d3caf1966268c62, entries=150, sequenceid=353, filesize=12.0 K 2024-12-08T00:20:31,154 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/bc0ef8c104114203bb24f3708a0954b4 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/bc0ef8c104114203bb24f3708a0954b4 2024-12-08T00:20:31,159 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/bc0ef8c104114203bb24f3708a0954b4, entries=150, sequenceid=353, filesize=12.0 K 2024-12-08T00:20:31,160 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for fabe935a14e4a2f5a6e3e15c47ba0977 in 529ms, sequenceid=353, compaction requested=true 2024-12-08T00:20:31,160 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:31,160 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:20:31,160 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fabe935a14e4a2f5a6e3e15c47ba0977:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:20:31,160 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:31,160 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:20:31,160 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fabe935a14e4a2f5a6e3e15c47ba0977:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T00:20:31,160 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:31,160 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fabe935a14e4a2f5a6e3e15c47ba0977:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:20:31,161 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:20:31,162 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103316 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:20:31,162 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): fabe935a14e4a2f5a6e3e15c47ba0977/A is initiating minor compaction (all files) 2024-12-08T00:20:31,162 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fabe935a14e4a2f5a6e3e15c47ba0977/A in TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:31,162 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:20:31,162 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/6041a372712a4c8aa781b8b489523d1e, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/cde9ede3f95b4779994aee82a725b18f, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/b05ee7e560bd44879f964bfe89d616e6] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp, totalSize=100.9 K 2024-12-08T00:20:31,162 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): fabe935a14e4a2f5a6e3e15c47ba0977/B is initiating minor compaction (all files) 2024-12-08T00:20:31,162 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fabe935a14e4a2f5a6e3e15c47ba0977/B in TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:31,162 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:31,162 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. files: [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/6041a372712a4c8aa781b8b489523d1e, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/cde9ede3f95b4779994aee82a725b18f, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/b05ee7e560bd44879f964bfe89d616e6] 2024-12-08T00:20:31,162 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/fe1a5caa31e844b6bd0e7437631b1ea6, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/0380c3584cd948cc8bb435906fe933eb, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/8449e52d37fb4c028d3caf1966268c62] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp, totalSize=36.8 K 2024-12-08T00:20:31,163 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting fe1a5caa31e844b6bd0e7437631b1ea6, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=310, earliestPutTs=1733617227548 2024-12-08T00:20:31,163 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6041a372712a4c8aa781b8b489523d1e, keycount=150, bloomtype=ROW, size=31.4 K, encoding=NONE, compression=NONE, seqNum=310, earliestPutTs=1733617227548 2024-12-08T00:20:31,163 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 0380c3584cd948cc8bb435906fe933eb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=335, earliestPutTs=1733617229888 2024-12-08T00:20:31,163 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting cde9ede3f95b4779994aee82a725b18f, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=335, earliestPutTs=1733617229888 2024-12-08T00:20:31,164 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 8449e52d37fb4c028d3caf1966268c62, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=353, earliestPutTs=1733617230511 2024-12-08T00:20:31,164 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting b05ee7e560bd44879f964bfe89d616e6, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=353, earliestPutTs=1733617230511 2024-12-08T00:20:31,172 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:31,181 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024120844531cc3c1d34596a58e5373a6685c36_fabe935a14e4a2f5a6e3e15c47ba0977 store=[table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:31,182 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fabe935a14e4a2f5a6e3e15c47ba0977#B#compaction#214 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:20:31,183 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024120844531cc3c1d34596a58e5373a6685c36_fabe935a14e4a2f5a6e3e15c47ba0977, store=[table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:31,183 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/f81aa1c2c27841dab1cefe4eb6fea0b7 is 50, key is test_row_0/B:col10/1733617230630/Put/seqid=0 2024-12-08T00:20:31,183 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120844531cc3c1d34596a58e5373a6685c36_fabe935a14e4a2f5a6e3e15c47ba0977 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:31,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742081_1257 (size=4469) 2024-12-08T00:20:31,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742080_1256 (size=13153) 2024-12-08T00:20:31,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:31,269 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fabe935a14e4a2f5a6e3e15c47ba0977 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-08T00:20:31,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=A 2024-12-08T00:20:31,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:31,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=B 2024-12-08T00:20:31,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:31,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=C 2024-12-08T00:20:31,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:31,278 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208ba4bd69d9bb448f1b1103b22c5e053e7_fabe935a14e4a2f5a6e3e15c47ba0977 is 50, key is test_row_0/A:col10/1733617231269/Put/seqid=0 2024-12-08T00:20:31,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-08T00:20:31,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742082_1258 (size=17534) 2024-12-08T00:20:31,286 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:31,288 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:31,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617291280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:31,288 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:31,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57412 deadline: 1733617291280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:31,288 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:31,288 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:31,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617291284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:31,289 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:31,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733617291285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:31,289 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:31,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617291286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:31,289 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-08T00:20:31,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:31,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. as already flushing 2024-12-08T00:20:31,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:31,289 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:31,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:31,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:31,291 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208ba4bd69d9bb448f1b1103b22c5e053e7_fabe935a14e4a2f5a6e3e15c47ba0977 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208ba4bd69d9bb448f1b1103b22c5e053e7_fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:31,293 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/2bff0d4efc534bdfb543d376f20a7f9e, store: [table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:31,293 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/2bff0d4efc534bdfb543d376f20a7f9e is 175, key is test_row_0/A:col10/1733617231269/Put/seqid=0 2024-12-08T00:20:31,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742083_1259 (size=48639) 2024-12-08T00:20:31,301 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=376, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/2bff0d4efc534bdfb543d376f20a7f9e 2024-12-08T00:20:31,310 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/3755985a1eec437cb79c29982a661f3b is 50, key is test_row_0/B:col10/1733617231269/Put/seqid=0 2024-12-08T00:20:31,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742084_1260 (size=12301) 2024-12-08T00:20:31,316 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=376 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/3755985a1eec437cb79c29982a661f3b 2024-12-08T00:20:31,324 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/01eb76e42bc64b018eab5ab32e596916 is 50, key is test_row_0/C:col10/1733617231269/Put/seqid=0 2024-12-08T00:20:31,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742085_1261 (size=12301) 2024-12-08T00:20:31,328 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=376 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/01eb76e42bc64b018eab5ab32e596916 2024-12-08T00:20:31,333 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/2bff0d4efc534bdfb543d376f20a7f9e as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/2bff0d4efc534bdfb543d376f20a7f9e 2024-12-08T00:20:31,337 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/2bff0d4efc534bdfb543d376f20a7f9e, entries=250, sequenceid=376, filesize=47.5 K 2024-12-08T00:20:31,339 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/3755985a1eec437cb79c29982a661f3b as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/3755985a1eec437cb79c29982a661f3b 2024-12-08T00:20:31,343 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/3755985a1eec437cb79c29982a661f3b, entries=150, sequenceid=376, filesize=12.0 K 2024-12-08T00:20:31,345 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/01eb76e42bc64b018eab5ab32e596916 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/01eb76e42bc64b018eab5ab32e596916 2024-12-08T00:20:31,350 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/01eb76e42bc64b018eab5ab32e596916, entries=150, sequenceid=376, filesize=12.0 K 2024-12-08T00:20:31,351 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for fabe935a14e4a2f5a6e3e15c47ba0977 in 82ms, sequenceid=376, compaction requested=true 2024-12-08T00:20:31,351 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:31,351 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fabe935a14e4a2f5a6e3e15c47ba0977:A, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:20:31,351 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-08T00:20:31,351 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fabe935a14e4a2f5a6e3e15c47ba0977:B, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:20:31,351 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-12-08T00:20:31,351 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fabe935a14e4a2f5a6e3e15c47ba0977:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:20:31,351 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=4), splitQueue=0 2024-12-08T00:20:31,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:31,394 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fabe935a14e4a2f5a6e3e15c47ba0977 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-08T00:20:31,395 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=A 2024-12-08T00:20:31,395 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:31,395 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=B 2024-12-08T00:20:31,395 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:31,395 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=C 2024-12-08T00:20:31,395 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:31,405 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412084238f060e4c84666adee277c08c0977b_fabe935a14e4a2f5a6e3e15c47ba0977 is 50, key is test_row_0/A:col10/1733617231393/Put/seqid=0 2024-12-08T00:20:31,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742086_1262 (size=14994) 2024-12-08T00:20:31,441 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:31,442 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-08T00:20:31,442 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:31,442 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. as already flushing 2024-12-08T00:20:31,442 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:31,442 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:31,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:31,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:31,447 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:31,447 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:31,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617291441, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:31,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617291442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:31,447 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:31,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617291442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:31,452 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:31,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57412 deadline: 1733617291447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:31,452 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:31,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733617291447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:31,548 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:31,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617291548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:31,549 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:31,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617291548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:31,549 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:31,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617291548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:31,574 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:31,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57412 deadline: 1733617291553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:31,575 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:31,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733617291571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:31,595 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:31,595 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-08T00:20:31,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:31,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. as already flushing 2024-12-08T00:20:31,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:31,596 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:31,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:31,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:31,600 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fabe935a14e4a2f5a6e3e15c47ba0977#A#compaction#213 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:20:31,601 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/10b2501ca2aa4830b3a1cc305cf2f8ac is 175, key is test_row_0/A:col10/1733617230630/Put/seqid=0 2024-12-08T00:20:31,607 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/f81aa1c2c27841dab1cefe4eb6fea0b7 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/f81aa1c2c27841dab1cefe4eb6fea0b7 2024-12-08T00:20:31,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742087_1263 (size=32107) 2024-12-08T00:20:31,614 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fabe935a14e4a2f5a6e3e15c47ba0977/B of fabe935a14e4a2f5a6e3e15c47ba0977 into f81aa1c2c27841dab1cefe4eb6fea0b7(size=12.8 K), total size for store is 24.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:20:31,614 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:31,614 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977., storeName=fabe935a14e4a2f5a6e3e15c47ba0977/B, priority=13, startTime=1733617231160; duration=0sec 2024-12-08T00:20:31,614 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=4), splitQueue=0 2024-12-08T00:20:31,614 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fabe935a14e4a2f5a6e3e15c47ba0977:B 2024-12-08T00:20:31,614 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 3 compacting, 1 eligible, 16 blocking 2024-12-08T00:20:31,615 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-08T00:20:31,615 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-08T00:20:31,615 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. because compaction request was cancelled 2024-12-08T00:20:31,615 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fabe935a14e4a2f5a6e3e15c47ba0977:A 2024-12-08T00:20:31,615 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T00:20:31,616 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/10b2501ca2aa4830b3a1cc305cf2f8ac as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/10b2501ca2aa4830b3a1cc305cf2f8ac 2024-12-08T00:20:31,618 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49954 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T00:20:31,618 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): fabe935a14e4a2f5a6e3e15c47ba0977/C is initiating minor compaction (all files) 2024-12-08T00:20:31,618 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fabe935a14e4a2f5a6e3e15c47ba0977/C in TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:31,618 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/5b969c71c875494e99f1c679b3d7ba13, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/67ae085ea1044a3b97ef3e3ce39a35fd, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/bc0ef8c104114203bb24f3708a0954b4, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/01eb76e42bc64b018eab5ab32e596916] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp, totalSize=48.8 K 2024-12-08T00:20:31,619 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 5b969c71c875494e99f1c679b3d7ba13, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=310, earliestPutTs=1733617227548 2024-12-08T00:20:31,619 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 67ae085ea1044a3b97ef3e3ce39a35fd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=335, earliestPutTs=1733617229888 2024-12-08T00:20:31,620 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting bc0ef8c104114203bb24f3708a0954b4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=353, earliestPutTs=1733617230511 2024-12-08T00:20:31,621 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 01eb76e42bc64b018eab5ab32e596916, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=376, earliestPutTs=1733617230656 2024-12-08T00:20:31,622 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fabe935a14e4a2f5a6e3e15c47ba0977/A of fabe935a14e4a2f5a6e3e15c47ba0977 into 10b2501ca2aa4830b3a1cc305cf2f8ac(size=31.4 K), total size for store is 78.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:20:31,623 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:31,623 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977., storeName=fabe935a14e4a2f5a6e3e15c47ba0977/A, priority=13, startTime=1733617231160; duration=0sec 2024-12-08T00:20:31,623 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-08T00:20:31,623 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fabe935a14e4a2f5a6e3e15c47ba0977:A 2024-12-08T00:20:31,623 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fabe935a14e4a2f5a6e3e15c47ba0977:B 2024-12-08T00:20:31,623 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 4 compacting, 0 eligible, 16 blocking 2024-12-08T00:20:31,623 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-08T00:20:31,623 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-08T00:20:31,623 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. because compaction request was cancelled 2024-12-08T00:20:31,623 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fabe935a14e4a2f5a6e3e15c47ba0977:C 2024-12-08T00:20:31,623 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-08T00:20:31,624 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-08T00:20:31,624 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-08T00:20:31,624 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. because compaction request was cancelled 2024-12-08T00:20:31,624 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fabe935a14e4a2f5a6e3e15c47ba0977:B 2024-12-08T00:20:31,633 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fabe935a14e4a2f5a6e3e15c47ba0977#C#compaction#219 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:20:31,633 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/d951ca2b6d194ca084b8ae3aab876bdf is 50, key is test_row_0/C:col10/1733617231269/Put/seqid=0 2024-12-08T00:20:31,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742088_1264 (size=13187) 2024-12-08T00:20:31,748 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:31,748 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-08T00:20:31,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:31,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. as already flushing 2024-12-08T00:20:31,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:31,749 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:31,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:31,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:31,751 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:31,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617291750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:31,751 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:31,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617291750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:31,751 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:31,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617291751, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:31,777 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:31,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57412 deadline: 1733617291775, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:31,778 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:31,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733617291777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:31,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-08T00:20:31,816 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:31,821 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412084238f060e4c84666adee277c08c0977b_fabe935a14e4a2f5a6e3e15c47ba0977 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412084238f060e4c84666adee277c08c0977b_fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:31,822 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/0611c4c551fc4bb0bfba825b872a06d8, store: [table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:31,822 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/0611c4c551fc4bb0bfba825b872a06d8 is 175, key is test_row_0/A:col10/1733617231393/Put/seqid=0 2024-12-08T00:20:31,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742089_1265 (size=39949) 2024-12-08T00:20:31,902 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:31,902 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-08T00:20:31,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:31,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. as already flushing 2024-12-08T00:20:31,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:31,903 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:31,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:31,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:32,052 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/d951ca2b6d194ca084b8ae3aab876bdf as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/d951ca2b6d194ca084b8ae3aab876bdf 2024-12-08T00:20:32,054 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:32,054 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:32,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617292052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:32,055 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-08T00:20:32,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:32,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. as already flushing 2024-12-08T00:20:32,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:32,055 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:32,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:32,056 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:32,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617292053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:32,056 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:32,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617292054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:32,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:32,059 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in fabe935a14e4a2f5a6e3e15c47ba0977/C of fabe935a14e4a2f5a6e3e15c47ba0977 into d951ca2b6d194ca084b8ae3aab876bdf(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:20:32,059 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:32,059 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977., storeName=fabe935a14e4a2f5a6e3e15c47ba0977/C, priority=12, startTime=1733617231351; duration=0sec 2024-12-08T00:20:32,059 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:32,059 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fabe935a14e4a2f5a6e3e15c47ba0977:C 2024-12-08T00:20:32,081 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:32,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733617292080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:32,081 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:32,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57412 deadline: 1733617292080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:32,208 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:32,208 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-08T00:20:32,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:32,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. as already flushing 2024-12-08T00:20:32,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:32,209 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:32,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:32,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:32,227 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=393, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/0611c4c551fc4bb0bfba825b872a06d8 2024-12-08T00:20:32,235 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/2a330898349242c79cd710f3a06988de is 50, key is test_row_0/B:col10/1733617231393/Put/seqid=0 2024-12-08T00:20:32,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742090_1266 (size=12301) 2024-12-08T00:20:32,361 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:32,361 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-08T00:20:32,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:32,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. as already flushing 2024-12-08T00:20:32,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:32,361 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:32,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:32,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:32,513 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:32,514 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-08T00:20:32,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:32,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. as already flushing 2024-12-08T00:20:32,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:32,514 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:32,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:32,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:32,556 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:32,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617292555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:32,560 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:32,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617292559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:32,564 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:32,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617292562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:32,583 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:32,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733617292582, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:32,588 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:32,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57412 deadline: 1733617292585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:32,666 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:32,667 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-08T00:20:32,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:32,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. as already flushing 2024-12-08T00:20:32,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:32,667 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:32,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:32,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:32,671 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=393 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/2a330898349242c79cd710f3a06988de 2024-12-08T00:20:32,680 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/4e9a755de86c43c394ea814f0ab54575 is 50, key is test_row_0/C:col10/1733617231393/Put/seqid=0 2024-12-08T00:20:32,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742091_1267 (size=12301) 2024-12-08T00:20:32,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-08T00:20:32,821 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:32,821 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-08T00:20:32,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:32,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. as already flushing 2024-12-08T00:20:32,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:32,821 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:32,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:32,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:32,973 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:32,974 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-08T00:20:32,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:32,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. as already flushing 2024-12-08T00:20:32,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:32,974 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:32,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:32,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:33,119 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=393 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/4e9a755de86c43c394ea814f0ab54575 2024-12-08T00:20:33,124 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/0611c4c551fc4bb0bfba825b872a06d8 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/0611c4c551fc4bb0bfba825b872a06d8 2024-12-08T00:20:33,127 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:33,127 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-08T00:20:33,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:33,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. as already flushing 2024-12-08T00:20:33,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:33,127 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:33,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:33,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:33,129 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/0611c4c551fc4bb0bfba825b872a06d8, entries=200, sequenceid=393, filesize=39.0 K 2024-12-08T00:20:33,130 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/2a330898349242c79cd710f3a06988de as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/2a330898349242c79cd710f3a06988de 2024-12-08T00:20:33,134 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/2a330898349242c79cd710f3a06988de, entries=150, sequenceid=393, filesize=12.0 K 2024-12-08T00:20:33,135 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/4e9a755de86c43c394ea814f0ab54575 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/4e9a755de86c43c394ea814f0ab54575 2024-12-08T00:20:33,139 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/4e9a755de86c43c394ea814f0ab54575, entries=150, sequenceid=393, filesize=12.0 K 2024-12-08T00:20:33,140 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for fabe935a14e4a2f5a6e3e15c47ba0977 in 1746ms, sequenceid=393, compaction requested=true 2024-12-08T00:20:33,140 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:33,140 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:20:33,140 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fabe935a14e4a2f5a6e3e15c47ba0977:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:20:33,140 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:33,140 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fabe935a14e4a2f5a6e3e15c47ba0977:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T00:20:33,141 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:33,141 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:20:33,141 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fabe935a14e4a2f5a6e3e15c47ba0977:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:20:33,141 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:20:33,141 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 120695 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:20:33,141 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): fabe935a14e4a2f5a6e3e15c47ba0977/A is initiating minor compaction (all files) 2024-12-08T00:20:33,141 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fabe935a14e4a2f5a6e3e15c47ba0977/A in TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:33,142 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/10b2501ca2aa4830b3a1cc305cf2f8ac, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/2bff0d4efc534bdfb543d376f20a7f9e, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/0611c4c551fc4bb0bfba825b872a06d8] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp, totalSize=117.9 K 2024-12-08T00:20:33,142 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:33,142 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. files: [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/10b2501ca2aa4830b3a1cc305cf2f8ac, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/2bff0d4efc534bdfb543d376f20a7f9e, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/0611c4c551fc4bb0bfba825b872a06d8] 2024-12-08T00:20:33,142 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 10b2501ca2aa4830b3a1cc305cf2f8ac, keycount=150, bloomtype=ROW, size=31.4 K, encoding=NONE, compression=NONE, seqNum=353, earliestPutTs=1733617230511 2024-12-08T00:20:33,142 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:20:33,143 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): fabe935a14e4a2f5a6e3e15c47ba0977/B is initiating minor compaction (all files) 2024-12-08T00:20:33,143 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2bff0d4efc534bdfb543d376f20a7f9e, keycount=250, bloomtype=ROW, size=47.5 K, encoding=NONE, compression=NONE, seqNum=376, earliestPutTs=1733617230656 2024-12-08T00:20:33,143 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fabe935a14e4a2f5a6e3e15c47ba0977/B in TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:33,143 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/f81aa1c2c27841dab1cefe4eb6fea0b7, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/3755985a1eec437cb79c29982a661f3b, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/2a330898349242c79cd710f3a06988de] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp, totalSize=36.9 K 2024-12-08T00:20:33,143 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0611c4c551fc4bb0bfba825b872a06d8, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=393, earliestPutTs=1733617231284 2024-12-08T00:20:33,144 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting f81aa1c2c27841dab1cefe4eb6fea0b7, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=353, earliestPutTs=1733617230511 2024-12-08T00:20:33,144 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 3755985a1eec437cb79c29982a661f3b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=376, earliestPutTs=1733617230656 2024-12-08T00:20:33,145 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 2a330898349242c79cd710f3a06988de, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=393, earliestPutTs=1733617231393 2024-12-08T00:20:33,151 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:33,154 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fabe935a14e4a2f5a6e3e15c47ba0977#B#compaction#223 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:20:33,154 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/0f2c4a8e3c0b4f65a83d8961c62f9085 is 50, key is test_row_0/B:col10/1733617231393/Put/seqid=0 2024-12-08T00:20:33,157 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412083caa0812e59542e5b836f91a87b88202_fabe935a14e4a2f5a6e3e15c47ba0977 store=[table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:33,158 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412083caa0812e59542e5b836f91a87b88202_fabe935a14e4a2f5a6e3e15c47ba0977, store=[table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:33,158 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412083caa0812e59542e5b836f91a87b88202_fabe935a14e4a2f5a6e3e15c47ba0977 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:33,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742092_1268 (size=13255) 2024-12-08T00:20:33,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742093_1269 (size=4469) 2024-12-08T00:20:33,164 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fabe935a14e4a2f5a6e3e15c47ba0977#A#compaction#222 average throughput is 1.88 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:20:33,164 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/89c0eca53c884a19ae46238738f8b435 is 175, key is test_row_0/A:col10/1733617231393/Put/seqid=0 2024-12-08T00:20:33,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742094_1270 (size=32209) 2024-12-08T00:20:33,280 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:33,280 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-08T00:20:33,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:33,281 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2837): Flushing fabe935a14e4a2f5a6e3e15c47ba0977 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-08T00:20:33,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=A 2024-12-08T00:20:33,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:33,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=B 2024-12-08T00:20:33,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:33,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=C 2024-12-08T00:20:33,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:33,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412087fbbecbe862d41f78aa73a8eef3cacfe_fabe935a14e4a2f5a6e3e15c47ba0977 is 50, key is test_row_0/A:col10/1733617231441/Put/seqid=0 2024-12-08T00:20:33,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742095_1271 (size=12454) 2024-12-08T00:20:33,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:33,564 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. as already flushing 2024-12-08T00:20:33,565 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/0f2c4a8e3c0b4f65a83d8961c62f9085 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/0f2c4a8e3c0b4f65a83d8961c62f9085 2024-12-08T00:20:33,575 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/89c0eca53c884a19ae46238738f8b435 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/89c0eca53c884a19ae46238738f8b435 2024-12-08T00:20:33,575 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fabe935a14e4a2f5a6e3e15c47ba0977/B of fabe935a14e4a2f5a6e3e15c47ba0977 into 0f2c4a8e3c0b4f65a83d8961c62f9085(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:20:33,575 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:33,576 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977., storeName=fabe935a14e4a2f5a6e3e15c47ba0977/B, priority=13, startTime=1733617233140; duration=0sec 2024-12-08T00:20:33,576 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:20:33,576 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fabe935a14e4a2f5a6e3e15c47ba0977:B 2024-12-08T00:20:33,576 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-08T00:20:33,579 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-08T00:20:33,579 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-08T00:20:33,579 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. because compaction request was cancelled 2024-12-08T00:20:33,579 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fabe935a14e4a2f5a6e3e15c47ba0977:C 2024-12-08T00:20:33,583 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fabe935a14e4a2f5a6e3e15c47ba0977/A of fabe935a14e4a2f5a6e3e15c47ba0977 into 89c0eca53c884a19ae46238738f8b435(size=31.5 K), total size for store is 31.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:20:33,583 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:33,583 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977., storeName=fabe935a14e4a2f5a6e3e15c47ba0977/A, priority=13, startTime=1733617233140; duration=0sec 2024-12-08T00:20:33,583 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:33,583 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fabe935a14e4a2f5a6e3e15c47ba0977:A 2024-12-08T00:20:33,586 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:33,586 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:33,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617293583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:33,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617293584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:33,586 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:33,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617293586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:33,590 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:33,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57412 deadline: 1733617293590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:33,591 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:33,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733617293591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:33,688 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:33,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617293687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:33,689 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:33,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617293687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:33,690 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:33,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617293688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:33,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:33,698 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412087fbbecbe862d41f78aa73a8eef3cacfe_fabe935a14e4a2f5a6e3e15c47ba0977 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412087fbbecbe862d41f78aa73a8eef3cacfe_fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:33,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/2b67613b3b1a44e9b7c64d0cc6bfe3bc, store: [table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:33,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/2b67613b3b1a44e9b7c64d0cc6bfe3bc is 175, key is test_row_0/A:col10/1733617231441/Put/seqid=0 2024-12-08T00:20:33,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742096_1272 (size=31255) 2024-12-08T00:20:33,890 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:33,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617293889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:33,891 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:33,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617293889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:33,894 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:33,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617293892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:34,105 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=415, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/2b67613b3b1a44e9b7c64d0cc6bfe3bc 2024-12-08T00:20:34,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/3d92a1487b2b4368803e70ebd5b6bfa4 is 50, key is test_row_0/B:col10/1733617231441/Put/seqid=0 2024-12-08T00:20:34,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742097_1273 (size=12301) 2024-12-08T00:20:34,142 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=415 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/3d92a1487b2b4368803e70ebd5b6bfa4 2024-12-08T00:20:34,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/31c8585a849b447389da080e25484cdc is 50, key is test_row_0/C:col10/1733617231441/Put/seqid=0 2024-12-08T00:20:34,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742098_1274 (size=12301) 2024-12-08T00:20:34,176 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=415 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/31c8585a849b447389da080e25484cdc 2024-12-08T00:20:34,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/2b67613b3b1a44e9b7c64d0cc6bfe3bc as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/2b67613b3b1a44e9b7c64d0cc6bfe3bc 2024-12-08T00:20:34,184 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/2b67613b3b1a44e9b7c64d0cc6bfe3bc, entries=150, sequenceid=415, filesize=30.5 K 2024-12-08T00:20:34,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/3d92a1487b2b4368803e70ebd5b6bfa4 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/3d92a1487b2b4368803e70ebd5b6bfa4 2024-12-08T00:20:34,189 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/3d92a1487b2b4368803e70ebd5b6bfa4, entries=150, sequenceid=415, filesize=12.0 K 2024-12-08T00:20:34,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/31c8585a849b447389da080e25484cdc as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/31c8585a849b447389da080e25484cdc 2024-12-08T00:20:34,193 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:34,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617294192, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:34,193 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:34,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617294193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:34,196 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/31c8585a849b447389da080e25484cdc, entries=150, sequenceid=415, filesize=12.0 K 2024-12-08T00:20:34,196 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for fabe935a14e4a2f5a6e3e15c47ba0977 in 916ms, sequenceid=415, compaction requested=true 2024-12-08T00:20:34,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2538): Flush status journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:34,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:34,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=58 2024-12-08T00:20:34,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4106): Remote procedure done, pid=58 2024-12-08T00:20:34,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:34,199 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=58, resume processing ppid=57 2024-12-08T00:20:34,199 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=58, ppid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.5210 sec 2024-12-08T00:20:34,199 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fabe935a14e4a2f5a6e3e15c47ba0977 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-08T00:20:34,199 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=A 2024-12-08T00:20:34,199 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:34,199 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=B 2024-12-08T00:20:34,199 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:34,200 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=C 2024-12-08T00:20:34,200 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:34,201 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees in 3.5240 sec 2024-12-08T00:20:34,207 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120881f429573bf3495589fa56e93a451230_fabe935a14e4a2f5a6e3e15c47ba0977 is 50, key is test_row_0/A:col10/1733617233585/Put/seqid=0 2024-12-08T00:20:34,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742099_1275 (size=12454) 2024-12-08T00:20:34,214 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:34,217 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120881f429573bf3495589fa56e93a451230_fabe935a14e4a2f5a6e3e15c47ba0977 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120881f429573bf3495589fa56e93a451230_fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:34,218 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/f9afbf351edf47ac84b32b9546069f10, store: [table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:34,219 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/f9afbf351edf47ac84b32b9546069f10 is 175, key is test_row_0/A:col10/1733617233585/Put/seqid=0 2024-12-08T00:20:34,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742100_1276 (size=31255) 2024-12-08T00:20:34,223 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=435, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/f9afbf351edf47ac84b32b9546069f10 2024-12-08T00:20:34,232 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/4779ee347ae74001bc25982c35dffa4e is 50, key is test_row_0/B:col10/1733617233585/Put/seqid=0 2024-12-08T00:20:34,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742101_1277 (size=12301) 2024-12-08T00:20:34,243 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:34,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617294242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:34,346 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:34,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617294344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:34,566 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:34,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617294565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:34,637 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=435 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/4779ee347ae74001bc25982c35dffa4e 2024-12-08T00:20:34,650 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/c17bd397a2f54bb5ac68b5a364e8ad05 is 50, key is test_row_0/C:col10/1733617233585/Put/seqid=0 2024-12-08T00:20:34,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742102_1278 (size=12301) 2024-12-08T00:20:34,669 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=435 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/c17bd397a2f54bb5ac68b5a364e8ad05 2024-12-08T00:20:34,675 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/f9afbf351edf47ac84b32b9546069f10 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/f9afbf351edf47ac84b32b9546069f10 2024-12-08T00:20:34,679 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/f9afbf351edf47ac84b32b9546069f10, entries=150, sequenceid=435, filesize=30.5 K 2024-12-08T00:20:34,680 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/4779ee347ae74001bc25982c35dffa4e as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/4779ee347ae74001bc25982c35dffa4e 2024-12-08T00:20:34,684 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/4779ee347ae74001bc25982c35dffa4e, entries=150, sequenceid=435, filesize=12.0 K 2024-12-08T00:20:34,685 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/c17bd397a2f54bb5ac68b5a364e8ad05 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/c17bd397a2f54bb5ac68b5a364e8ad05 2024-12-08T00:20:34,695 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:34,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617294694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:34,696 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/c17bd397a2f54bb5ac68b5a364e8ad05, entries=150, sequenceid=435, filesize=12.0 K 2024-12-08T00:20:34,696 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:34,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617294695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:34,697 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for fabe935a14e4a2f5a6e3e15c47ba0977 in 498ms, sequenceid=435, compaction requested=true 2024-12-08T00:20:34,698 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:34,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fabe935a14e4a2f5a6e3e15c47ba0977:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:20:34,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:34,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fabe935a14e4a2f5a6e3e15c47ba0977:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T00:20:34,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:20:34,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fabe935a14e4a2f5a6e3e15c47ba0977:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:20:34,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-08T00:20:34,698 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:20:34,698 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T00:20:34,700 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50090 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T00:20:34,700 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): fabe935a14e4a2f5a6e3e15c47ba0977/C is initiating minor compaction (all files) 2024-12-08T00:20:34,700 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fabe935a14e4a2f5a6e3e15c47ba0977/C in TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:34,700 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/d951ca2b6d194ca084b8ae3aab876bdf, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/4e9a755de86c43c394ea814f0ab54575, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/31c8585a849b447389da080e25484cdc, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/c17bd397a2f54bb5ac68b5a364e8ad05] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp, totalSize=48.9 K 2024-12-08T00:20:34,700 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94719 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:20:34,700 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): fabe935a14e4a2f5a6e3e15c47ba0977/A is initiating minor compaction (all files) 2024-12-08T00:20:34,700 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fabe935a14e4a2f5a6e3e15c47ba0977/A in TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:34,701 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/89c0eca53c884a19ae46238738f8b435, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/2b67613b3b1a44e9b7c64d0cc6bfe3bc, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/f9afbf351edf47ac84b32b9546069f10] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp, totalSize=92.5 K 2024-12-08T00:20:34,701 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:34,701 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. files: [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/89c0eca53c884a19ae46238738f8b435, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/2b67613b3b1a44e9b7c64d0cc6bfe3bc, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/f9afbf351edf47ac84b32b9546069f10] 2024-12-08T00:20:34,701 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting d951ca2b6d194ca084b8ae3aab876bdf, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=376, earliestPutTs=1733617230656 2024-12-08T00:20:34,701 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 89c0eca53c884a19ae46238738f8b435, keycount=150, bloomtype=ROW, size=31.5 K, encoding=NONE, compression=NONE, seqNum=393, earliestPutTs=1733617231393 2024-12-08T00:20:34,702 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 4e9a755de86c43c394ea814f0ab54575, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=393, earliestPutTs=1733617231393 2024-12-08T00:20:34,702 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2b67613b3b1a44e9b7c64d0cc6bfe3bc, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=415, earliestPutTs=1733617231439 2024-12-08T00:20:34,702 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting f9afbf351edf47ac84b32b9546069f10, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=435, earliestPutTs=1733617233577 2024-12-08T00:20:34,702 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 31c8585a849b447389da080e25484cdc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=415, earliestPutTs=1733617231439 2024-12-08T00:20:34,703 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting c17bd397a2f54bb5ac68b5a364e8ad05, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=435, earliestPutTs=1733617233577 2024-12-08T00:20:34,716 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:34,726 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fabe935a14e4a2f5a6e3e15c47ba0977#C#compaction#231 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:20:34,727 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/5cfdd9a330814b7dac308249ab90bc15 is 50, key is test_row_0/C:col10/1733617233585/Put/seqid=0 2024-12-08T00:20:34,736 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241208df5cd205768b45128518023632d77f84_fabe935a14e4a2f5a6e3e15c47ba0977 store=[table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:34,739 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241208df5cd205768b45128518023632d77f84_fabe935a14e4a2f5a6e3e15c47ba0977, store=[table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:34,739 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208df5cd205768b45128518023632d77f84_fabe935a14e4a2f5a6e3e15c47ba0977 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:34,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742103_1279 (size=13323) 2024-12-08T00:20:34,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-08T00:20:34,784 INFO [Thread-753 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 57 completed 2024-12-08T00:20:34,786 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/5cfdd9a330814b7dac308249ab90bc15 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/5cfdd9a330814b7dac308249ab90bc15 2024-12-08T00:20:34,786 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T00:20:34,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees 2024-12-08T00:20:34,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-08T00:20:34,788 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:20:34,789 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:20:34,789 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=60, ppid=59, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:20:34,793 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in fabe935a14e4a2f5a6e3e15c47ba0977/C of fabe935a14e4a2f5a6e3e15c47ba0977 into 5cfdd9a330814b7dac308249ab90bc15(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:20:34,793 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:34,793 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977., storeName=fabe935a14e4a2f5a6e3e15c47ba0977/C, priority=12, startTime=1733617234698; duration=0sec 2024-12-08T00:20:34,793 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:20:34,793 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fabe935a14e4a2f5a6e3e15c47ba0977:C 2024-12-08T00:20:34,793 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:20:34,794 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37857 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:20:34,795 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): fabe935a14e4a2f5a6e3e15c47ba0977/B is initiating minor compaction (all files) 2024-12-08T00:20:34,795 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fabe935a14e4a2f5a6e3e15c47ba0977/B in TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:34,795 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/0f2c4a8e3c0b4f65a83d8961c62f9085, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/3d92a1487b2b4368803e70ebd5b6bfa4, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/4779ee347ae74001bc25982c35dffa4e] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp, totalSize=37.0 K 2024-12-08T00:20:34,795 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 0f2c4a8e3c0b4f65a83d8961c62f9085, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=393, earliestPutTs=1733617231393 2024-12-08T00:20:34,795 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 3d92a1487b2b4368803e70ebd5b6bfa4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=415, earliestPutTs=1733617231439 2024-12-08T00:20:34,796 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 4779ee347ae74001bc25982c35dffa4e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=435, earliestPutTs=1733617233577 2024-12-08T00:20:34,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742104_1280 (size=4469) 2024-12-08T00:20:34,804 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fabe935a14e4a2f5a6e3e15c47ba0977#A#compaction#230 average throughput is 0.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:20:34,805 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/29408b8440094aaebd19bdf27dbc413f is 175, key is test_row_0/A:col10/1733617233585/Put/seqid=0 2024-12-08T00:20:34,823 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fabe935a14e4a2f5a6e3e15c47ba0977#B#compaction#232 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:20:34,824 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/04af11801bd54e7d97c3fd6a6068ef27 is 50, key is test_row_0/B:col10/1733617233585/Put/seqid=0 2024-12-08T00:20:34,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742105_1281 (size=32311) 2024-12-08T00:20:34,848 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/29408b8440094aaebd19bdf27dbc413f as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/29408b8440094aaebd19bdf27dbc413f 2024-12-08T00:20:34,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742106_1282 (size=13357) 2024-12-08T00:20:34,854 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fabe935a14e4a2f5a6e3e15c47ba0977/A of fabe935a14e4a2f5a6e3e15c47ba0977 into 29408b8440094aaebd19bdf27dbc413f(size=31.6 K), total size for store is 31.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:20:34,855 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:34,855 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977., storeName=fabe935a14e4a2f5a6e3e15c47ba0977/A, priority=13, startTime=1733617234698; duration=0sec 2024-12-08T00:20:34,855 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:34,855 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fabe935a14e4a2f5a6e3e15c47ba0977:A 2024-12-08T00:20:34,856 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/04af11801bd54e7d97c3fd6a6068ef27 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/04af11801bd54e7d97c3fd6a6068ef27 2024-12-08T00:20:34,862 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fabe935a14e4a2f5a6e3e15c47ba0977/B of fabe935a14e4a2f5a6e3e15c47ba0977 into 04af11801bd54e7d97c3fd6a6068ef27(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:20:34,862 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:34,862 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977., storeName=fabe935a14e4a2f5a6e3e15c47ba0977/B, priority=13, startTime=1733617234698; duration=0sec 2024-12-08T00:20:34,862 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:34,862 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fabe935a14e4a2f5a6e3e15c47ba0977:B 2024-12-08T00:20:34,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:34,868 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fabe935a14e4a2f5a6e3e15c47ba0977 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-08T00:20:34,869 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=A 2024-12-08T00:20:34,869 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:34,869 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=B 2024-12-08T00:20:34,869 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:34,869 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=C 2024-12-08T00:20:34,869 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:34,879 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412083304ce6e98cf4eb596add34867f3f1b7_fabe935a14e4a2f5a6e3e15c47ba0977 is 50, key is test_row_0/A:col10/1733617234232/Put/seqid=0 2024-12-08T00:20:34,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742107_1283 (size=12454) 2024-12-08T00:20:34,888 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:34,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-08T00:20:34,893 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412083304ce6e98cf4eb596add34867f3f1b7_fabe935a14e4a2f5a6e3e15c47ba0977 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412083304ce6e98cf4eb596add34867f3f1b7_fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:34,894 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/2959502627e143928dd258cb37a78605, store: [table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:34,896 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/2959502627e143928dd258cb37a78605 is 175, key is test_row_0/A:col10/1733617234232/Put/seqid=0 2024-12-08T00:20:34,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742108_1284 (size=31255) 2024-12-08T00:20:34,901 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=457, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/2959502627e143928dd258cb37a78605 2024-12-08T00:20:34,903 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:34,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617294902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:34,911 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/faa2665c3e14468e917b1fe19b33f4bc is 50, key is test_row_0/B:col10/1733617234232/Put/seqid=0 2024-12-08T00:20:34,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742109_1285 (size=12301) 2024-12-08T00:20:34,941 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:34,941 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-12-08T00:20:34,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:34,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. as already flushing 2024-12-08T00:20:34,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:34,942 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:34,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:34,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:35,005 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:35,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617295004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:35,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-08T00:20:35,094 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:35,095 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-12-08T00:20:35,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:35,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. as already flushing 2024-12-08T00:20:35,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:35,095 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:35,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:35,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:35,207 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:35,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617295207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:35,247 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:35,248 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-12-08T00:20:35,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:35,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. as already flushing 2024-12-08T00:20:35,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:35,248 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:35,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:35,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:35,318 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=457 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/faa2665c3e14468e917b1fe19b33f4bc 2024-12-08T00:20:35,328 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/14693b018df74543a51c198a51ddfedd is 50, key is test_row_0/C:col10/1733617234232/Put/seqid=0 2024-12-08T00:20:35,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742110_1286 (size=12301) 2024-12-08T00:20:35,333 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=457 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/14693b018df74543a51c198a51ddfedd 2024-12-08T00:20:35,337 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/2959502627e143928dd258cb37a78605 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/2959502627e143928dd258cb37a78605 2024-12-08T00:20:35,341 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/2959502627e143928dd258cb37a78605, entries=150, sequenceid=457, filesize=30.5 K 2024-12-08T00:20:35,342 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/faa2665c3e14468e917b1fe19b33f4bc as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/faa2665c3e14468e917b1fe19b33f4bc 2024-12-08T00:20:35,346 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/faa2665c3e14468e917b1fe19b33f4bc, entries=150, sequenceid=457, filesize=12.0 K 2024-12-08T00:20:35,346 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/14693b018df74543a51c198a51ddfedd as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/14693b018df74543a51c198a51ddfedd 2024-12-08T00:20:35,351 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/14693b018df74543a51c198a51ddfedd, entries=150, sequenceid=457, filesize=12.0 K 2024-12-08T00:20:35,352 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for fabe935a14e4a2f5a6e3e15c47ba0977 in 483ms, sequenceid=457, compaction requested=false 2024-12-08T00:20:35,352 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:35,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-08T00:20:35,401 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:35,402 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-12-08T00:20:35,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:35,402 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2837): Flushing fabe935a14e4a2f5a6e3e15c47ba0977 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-08T00:20:35,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=A 2024-12-08T00:20:35,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:35,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=B 2024-12-08T00:20:35,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:35,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=C 2024-12-08T00:20:35,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:35,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412087ebe32e9aed2472d9bb65660370cc3e9_fabe935a14e4a2f5a6e3e15c47ba0977 is 50, key is test_row_0/A:col10/1733617234895/Put/seqid=0 2024-12-08T00:20:35,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742111_1287 (size=12454) 2024-12-08T00:20:35,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:35,420 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412087ebe32e9aed2472d9bb65660370cc3e9_fabe935a14e4a2f5a6e3e15c47ba0977 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412087ebe32e9aed2472d9bb65660370cc3e9_fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:35,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/dfc30ad3f20d4ec1ba5b04f67aa9fe5c, store: [table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:35,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/dfc30ad3f20d4ec1ba5b04f67aa9fe5c is 175, key is test_row_0/A:col10/1733617234895/Put/seqid=0 2024-12-08T00:20:35,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742112_1288 (size=31255) 2024-12-08T00:20:35,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:35,512 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. as already flushing 2024-12-08T00:20:35,553 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:35,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617295552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:35,609 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:35,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733617295607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:35,610 DEBUG [Thread-747 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4162 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977., hostname=017dd09fb407,36703,1733617179335, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T00:20:35,610 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:35,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57412 deadline: 1733617295608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:35,610 DEBUG [Thread-743 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4163 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977., hostname=017dd09fb407,36703,1733617179335, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T00:20:35,656 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:35,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617295654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:35,703 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:35,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617295701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:35,705 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:35,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617295703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:35,843 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=474, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/dfc30ad3f20d4ec1ba5b04f67aa9fe5c 2024-12-08T00:20:35,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/91ce2d5592844bf8953fcc34c1dc5a40 is 50, key is test_row_0/B:col10/1733617234895/Put/seqid=0 2024-12-08T00:20:35,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742113_1289 (size=12301) 2024-12-08T00:20:35,857 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=474 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/91ce2d5592844bf8953fcc34c1dc5a40 2024-12-08T00:20:35,858 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:35,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617295857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:35,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/2d1098b61f0644caa3a7b8391d900f63 is 50, key is test_row_0/C:col10/1733617234895/Put/seqid=0 2024-12-08T00:20:35,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-08T00:20:35,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742114_1290 (size=12301) 2024-12-08T00:20:35,899 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=474 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/2d1098b61f0644caa3a7b8391d900f63 2024-12-08T00:20:35,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/dfc30ad3f20d4ec1ba5b04f67aa9fe5c as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/dfc30ad3f20d4ec1ba5b04f67aa9fe5c 2024-12-08T00:20:35,910 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/dfc30ad3f20d4ec1ba5b04f67aa9fe5c, entries=150, sequenceid=474, filesize=30.5 K 2024-12-08T00:20:35,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/91ce2d5592844bf8953fcc34c1dc5a40 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/91ce2d5592844bf8953fcc34c1dc5a40 2024-12-08T00:20:35,917 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/91ce2d5592844bf8953fcc34c1dc5a40, entries=150, sequenceid=474, filesize=12.0 K 2024-12-08T00:20:35,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/2d1098b61f0644caa3a7b8391d900f63 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/2d1098b61f0644caa3a7b8391d900f63 2024-12-08T00:20:35,925 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/2d1098b61f0644caa3a7b8391d900f63, entries=150, sequenceid=474, filesize=12.0 K 2024-12-08T00:20:35,927 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for fabe935a14e4a2f5a6e3e15c47ba0977 in 525ms, sequenceid=474, compaction requested=true 2024-12-08T00:20:35,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2538): Flush status journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:35,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:35,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=60 2024-12-08T00:20:35,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4106): Remote procedure done, pid=60 2024-12-08T00:20:35,931 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=60, resume processing ppid=59 2024-12-08T00:20:35,931 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=60, ppid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1410 sec 2024-12-08T00:20:35,934 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees in 1.1450 sec 2024-12-08T00:20:36,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:36,164 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fabe935a14e4a2f5a6e3e15c47ba0977 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-08T00:20:36,164 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=A 2024-12-08T00:20:36,164 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:36,164 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=B 2024-12-08T00:20:36,164 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:36,164 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=C 2024-12-08T00:20:36,165 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:36,173 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120818b564c2abe9403194f66af28ce57a4e_fabe935a14e4a2f5a6e3e15c47ba0977 is 50, key is test_row_0/A:col10/1733617235547/Put/seqid=0 2024-12-08T00:20:36,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742115_1291 (size=14994) 2024-12-08T00:20:36,224 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:36,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 262 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617296221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:36,329 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:36,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 264 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617296326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:36,531 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:36,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 266 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617296530, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:36,595 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:36,605 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120818b564c2abe9403194f66af28ce57a4e_fabe935a14e4a2f5a6e3e15c47ba0977 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120818b564c2abe9403194f66af28ce57a4e_fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:36,606 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/d69e454259df4d69ad1941b75dcd7abc, store: [table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:36,607 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/d69e454259df4d69ad1941b75dcd7abc is 175, key is test_row_0/A:col10/1733617235547/Put/seqid=0 2024-12-08T00:20:36,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742116_1292 (size=39949) 2024-12-08T00:20:36,616 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=494, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/d69e454259df4d69ad1941b75dcd7abc 2024-12-08T00:20:36,625 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/969c690033244d7c91225893a404728f is 50, key is test_row_0/B:col10/1733617235547/Put/seqid=0 2024-12-08T00:20:36,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742117_1293 (size=12301) 2024-12-08T00:20:36,834 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:36,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 268 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617296833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:36,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-08T00:20:36,892 INFO [Thread-753 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 59 completed 2024-12-08T00:20:36,893 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T00:20:36,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=61, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees 2024-12-08T00:20:36,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-08T00:20:36,895 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=61, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:20:36,895 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=61, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:20:36,895 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=62, ppid=61, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:20:36,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-08T00:20:37,039 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=494 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/969c690033244d7c91225893a404728f 2024-12-08T00:20:37,047 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:37,047 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-08T00:20:37,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:37,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. as already flushing 2024-12-08T00:20:37,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:37,048 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:37,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:37,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:37,049 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/4d95852daab841c8b96bb2af6df904e8 is 50, key is test_row_0/C:col10/1733617235547/Put/seqid=0 2024-12-08T00:20:37,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742118_1294 (size=12301) 2024-12-08T00:20:37,054 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=494 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/4d95852daab841c8b96bb2af6df904e8 2024-12-08T00:20:37,058 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/d69e454259df4d69ad1941b75dcd7abc as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/d69e454259df4d69ad1941b75dcd7abc 2024-12-08T00:20:37,063 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/d69e454259df4d69ad1941b75dcd7abc, entries=200, sequenceid=494, filesize=39.0 K 2024-12-08T00:20:37,064 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/969c690033244d7c91225893a404728f as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/969c690033244d7c91225893a404728f 2024-12-08T00:20:37,070 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/969c690033244d7c91225893a404728f, entries=150, sequenceid=494, filesize=12.0 K 2024-12-08T00:20:37,072 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/4d95852daab841c8b96bb2af6df904e8 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/4d95852daab841c8b96bb2af6df904e8 2024-12-08T00:20:37,077 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/4d95852daab841c8b96bb2af6df904e8, entries=150, sequenceid=494, filesize=12.0 K 2024-12-08T00:20:37,078 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for fabe935a14e4a2f5a6e3e15c47ba0977 in 914ms, sequenceid=494, compaction requested=true 2024-12-08T00:20:37,078 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:37,078 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fabe935a14e4a2f5a6e3e15c47ba0977:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:20:37,078 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:37,078 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T00:20:37,078 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fabe935a14e4a2f5a6e3e15c47ba0977:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T00:20:37,078 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:37,078 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fabe935a14e4a2f5a6e3e15c47ba0977:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:20:37,078 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:20:37,078 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T00:20:37,079 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 134770 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T00:20:37,080 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): fabe935a14e4a2f5a6e3e15c47ba0977/A is initiating minor compaction (all files) 2024-12-08T00:20:37,080 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50260 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T00:20:37,080 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fabe935a14e4a2f5a6e3e15c47ba0977/A in TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:37,080 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): fabe935a14e4a2f5a6e3e15c47ba0977/B is initiating minor compaction (all files) 2024-12-08T00:20:37,080 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fabe935a14e4a2f5a6e3e15c47ba0977/B in TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:37,080 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/29408b8440094aaebd19bdf27dbc413f, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/2959502627e143928dd258cb37a78605, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/dfc30ad3f20d4ec1ba5b04f67aa9fe5c, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/d69e454259df4d69ad1941b75dcd7abc] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp, totalSize=131.6 K 2024-12-08T00:20:37,080 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:37,080 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/04af11801bd54e7d97c3fd6a6068ef27, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/faa2665c3e14468e917b1fe19b33f4bc, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/91ce2d5592844bf8953fcc34c1dc5a40, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/969c690033244d7c91225893a404728f] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp, totalSize=49.1 K 2024-12-08T00:20:37,080 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. files: [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/29408b8440094aaebd19bdf27dbc413f, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/2959502627e143928dd258cb37a78605, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/dfc30ad3f20d4ec1ba5b04f67aa9fe5c, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/d69e454259df4d69ad1941b75dcd7abc] 2024-12-08T00:20:37,080 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 04af11801bd54e7d97c3fd6a6068ef27, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=435, earliestPutTs=1733617233577 2024-12-08T00:20:37,081 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 29408b8440094aaebd19bdf27dbc413f, keycount=150, bloomtype=ROW, size=31.6 K, encoding=NONE, compression=NONE, seqNum=435, earliestPutTs=1733617233577 2024-12-08T00:20:37,081 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting faa2665c3e14468e917b1fe19b33f4bc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=457, earliestPutTs=1733617234232 2024-12-08T00:20:37,081 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2959502627e143928dd258cb37a78605, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=457, earliestPutTs=1733617234232 2024-12-08T00:20:37,081 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 91ce2d5592844bf8953fcc34c1dc5a40, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=474, earliestPutTs=1733617234891 2024-12-08T00:20:37,082 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting dfc30ad3f20d4ec1ba5b04f67aa9fe5c, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=474, earliestPutTs=1733617234891 2024-12-08T00:20:37,082 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 969c690033244d7c91225893a404728f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=494, earliestPutTs=1733617235547 2024-12-08T00:20:37,082 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting d69e454259df4d69ad1941b75dcd7abc, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=494, earliestPutTs=1733617235545 2024-12-08T00:20:37,091 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:37,093 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fabe935a14e4a2f5a6e3e15c47ba0977#B#compaction#242 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:20:37,093 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/2d98dc46575f40df835cb93aade2cbb7 is 50, key is test_row_0/B:col10/1733617235547/Put/seqid=0 2024-12-08T00:20:37,096 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241208bfdff2e4e9464749abb6602a65a632e6_fabe935a14e4a2f5a6e3e15c47ba0977 store=[table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:37,098 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241208bfdff2e4e9464749abb6602a65a632e6_fabe935a14e4a2f5a6e3e15c47ba0977, store=[table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:37,098 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208bfdff2e4e9464749abb6602a65a632e6_fabe935a14e4a2f5a6e3e15c47ba0977 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:37,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742119_1295 (size=13493) 2024-12-08T00:20:37,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742120_1296 (size=4469) 2024-12-08T00:20:37,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-08T00:20:37,200 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:37,201 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-08T00:20:37,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:37,202 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2837): Flushing fabe935a14e4a2f5a6e3e15c47ba0977 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-08T00:20:37,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=A 2024-12-08T00:20:37,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:37,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=B 2024-12-08T00:20:37,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:37,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=C 2024-12-08T00:20:37,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:37,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412080a5e5dbde5c14165b85f1268a0d66998_fabe935a14e4a2f5a6e3e15c47ba0977 is 50, key is test_row_0/A:col10/1733617236196/Put/seqid=0 2024-12-08T00:20:37,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742121_1297 (size=12454) 2024-12-08T00:20:37,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:37,338 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. as already flushing 2024-12-08T00:20:37,376 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:37,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 287 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617297376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:37,419 DEBUG [Thread-754 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x247c0c93 to 127.0.0.1:62287 2024-12-08T00:20:37,419 DEBUG [Thread-754 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:20:37,420 DEBUG [Thread-760 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7a11164b to 127.0.0.1:62287 2024-12-08T00:20:37,420 DEBUG [Thread-760 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:20:37,421 DEBUG [Thread-758 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3448d233 to 127.0.0.1:62287 2024-12-08T00:20:37,421 DEBUG [Thread-758 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:20:37,422 DEBUG [Thread-756 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x517ff977 to 127.0.0.1:62287 2024-12-08T00:20:37,422 DEBUG [Thread-756 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:20:37,478 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:37,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 289 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617297478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:37,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-08T00:20:37,510 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/2d98dc46575f40df835cb93aade2cbb7 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/2d98dc46575f40df835cb93aade2cbb7 2024-12-08T00:20:37,514 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in fabe935a14e4a2f5a6e3e15c47ba0977/B of fabe935a14e4a2f5a6e3e15c47ba0977 into 2d98dc46575f40df835cb93aade2cbb7(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:20:37,514 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:37,514 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977., storeName=fabe935a14e4a2f5a6e3e15c47ba0977/B, priority=12, startTime=1733617237078; duration=0sec 2024-12-08T00:20:37,514 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:20:37,514 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fabe935a14e4a2f5a6e3e15c47ba0977:B 2024-12-08T00:20:37,514 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T00:20:37,515 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50226 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T00:20:37,515 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): fabe935a14e4a2f5a6e3e15c47ba0977/C is initiating minor compaction (all files) 2024-12-08T00:20:37,515 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fabe935a14e4a2f5a6e3e15c47ba0977/C in TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:37,516 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/5cfdd9a330814b7dac308249ab90bc15, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/14693b018df74543a51c198a51ddfedd, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/2d1098b61f0644caa3a7b8391d900f63, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/4d95852daab841c8b96bb2af6df904e8] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp, totalSize=49.0 K 2024-12-08T00:20:37,516 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 5cfdd9a330814b7dac308249ab90bc15, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=435, earliestPutTs=1733617233577 2024-12-08T00:20:37,516 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 14693b018df74543a51c198a51ddfedd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=457, earliestPutTs=1733617234232 2024-12-08T00:20:37,517 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 2d1098b61f0644caa3a7b8391d900f63, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=474, earliestPutTs=1733617234891 2024-12-08T00:20:37,517 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 4d95852daab841c8b96bb2af6df904e8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=494, earliestPutTs=1733617235547 2024-12-08T00:20:37,520 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fabe935a14e4a2f5a6e3e15c47ba0977#A#compaction#243 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:20:37,521 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/93c365e172144a0caba9eab60b9f8e29 is 175, key is test_row_0/A:col10/1733617235547/Put/seqid=0 2024-12-08T00:20:37,530 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fabe935a14e4a2f5a6e3e15c47ba0977#C#compaction#245 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:20:37,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742122_1298 (size=32447) 2024-12-08T00:20:37,531 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/d8a52d4b00d74768abccc60fc271a49d is 50, key is test_row_0/C:col10/1733617235547/Put/seqid=0 2024-12-08T00:20:37,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742123_1299 (size=13459) 2024-12-08T00:20:37,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:37,619 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412080a5e5dbde5c14165b85f1268a0d66998_fabe935a14e4a2f5a6e3e15c47ba0977 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412080a5e5dbde5c14165b85f1268a0d66998_fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:37,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/410ec4085924444d994037da7cda9f6c, store: [table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:37,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/410ec4085924444d994037da7cda9f6c is 175, key is test_row_0/A:col10/1733617236196/Put/seqid=0 2024-12-08T00:20:37,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742124_1300 (size=31255) 2024-12-08T00:20:37,679 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:37,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 291 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617297679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:37,705 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-08T00:20:37,717 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:37,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57400 deadline: 1733617297717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:37,718 DEBUG [Thread-745 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4135 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977., hostname=017dd09fb407,36703,1733617179335, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T00:20:37,718 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:37,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57452 deadline: 1733617297718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:37,719 DEBUG [Thread-751 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4136 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977., hostname=017dd09fb407,36703,1733617179335, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T00:20:37,936 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/93c365e172144a0caba9eab60b9f8e29 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/93c365e172144a0caba9eab60b9f8e29 2024-12-08T00:20:37,940 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/d8a52d4b00d74768abccc60fc271a49d as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/d8a52d4b00d74768abccc60fc271a49d 2024-12-08T00:20:37,941 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in fabe935a14e4a2f5a6e3e15c47ba0977/A of fabe935a14e4a2f5a6e3e15c47ba0977 into 93c365e172144a0caba9eab60b9f8e29(size=31.7 K), total size for store is 31.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:20:37,941 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:37,941 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977., storeName=fabe935a14e4a2f5a6e3e15c47ba0977/A, priority=12, startTime=1733617237078; duration=0sec 2024-12-08T00:20:37,941 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:37,941 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fabe935a14e4a2f5a6e3e15c47ba0977:A 2024-12-08T00:20:37,944 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in fabe935a14e4a2f5a6e3e15c47ba0977/C of fabe935a14e4a2f5a6e3e15c47ba0977 into d8a52d4b00d74768abccc60fc271a49d(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:20:37,944 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:37,944 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977., storeName=fabe935a14e4a2f5a6e3e15c47ba0977/C, priority=12, startTime=1733617237078; duration=0sec 2024-12-08T00:20:37,944 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:37,944 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fabe935a14e4a2f5a6e3e15c47ba0977:C 2024-12-08T00:20:37,983 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:37,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 293 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617297982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:37,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-08T00:20:38,026 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=510, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/410ec4085924444d994037da7cda9f6c 2024-12-08T00:20:38,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/effe634907e947c3a0e87fae0c9a2ff1 is 50, key is test_row_0/B:col10/1733617236196/Put/seqid=0 2024-12-08T00:20:38,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742125_1301 (size=12301) 2024-12-08T00:20:38,436 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=510 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/effe634907e947c3a0e87fae0c9a2ff1 2024-12-08T00:20:38,442 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/43f1d6f0c2c9440f8f6a7ec82bf0d716 is 50, key is test_row_0/C:col10/1733617236196/Put/seqid=0 2024-12-08T00:20:38,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742126_1302 (size=12301) 2024-12-08T00:20:38,488 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:38,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 295 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57468 deadline: 1733617298488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:38,846 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=510 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/43f1d6f0c2c9440f8f6a7ec82bf0d716 2024-12-08T00:20:38,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/410ec4085924444d994037da7cda9f6c as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/410ec4085924444d994037da7cda9f6c 2024-12-08T00:20:38,854 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/410ec4085924444d994037da7cda9f6c, entries=150, sequenceid=510, filesize=30.5 K 2024-12-08T00:20:38,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/effe634907e947c3a0e87fae0c9a2ff1 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/effe634907e947c3a0e87fae0c9a2ff1 2024-12-08T00:20:38,858 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/effe634907e947c3a0e87fae0c9a2ff1, entries=150, sequenceid=510, filesize=12.0 K 2024-12-08T00:20:38,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/43f1d6f0c2c9440f8f6a7ec82bf0d716 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/43f1d6f0c2c9440f8f6a7ec82bf0d716 2024-12-08T00:20:38,862 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/43f1d6f0c2c9440f8f6a7ec82bf0d716, entries=150, sequenceid=510, filesize=12.0 K 2024-12-08T00:20:38,863 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for fabe935a14e4a2f5a6e3e15c47ba0977 in 1662ms, sequenceid=510, compaction requested=false 2024-12-08T00:20:38,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2538): Flush status journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:38,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:38,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=62 2024-12-08T00:20:38,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4106): Remote procedure done, pid=62 2024-12-08T00:20:38,866 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=62, resume processing ppid=61 2024-12-08T00:20:38,866 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=62, ppid=61, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9700 sec 2024-12-08T00:20:38,867 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=61, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees in 1.9730 sec 2024-12-08T00:20:38,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-08T00:20:38,999 INFO [Thread-753 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 61 completed 2024-12-08T00:20:39,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:39,492 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fabe935a14e4a2f5a6e3e15c47ba0977 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-08T00:20:39,492 DEBUG [Thread-749 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x42e904d8 to 127.0.0.1:62287 2024-12-08T00:20:39,492 DEBUG [Thread-749 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:20:39,492 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=A 2024-12-08T00:20:39,492 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:39,492 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=B 2024-12-08T00:20:39,492 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:39,492 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=C 2024-12-08T00:20:39,492 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:39,498 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412089b1233955b0c47c582e9c8126e0a6a95_fabe935a14e4a2f5a6e3e15c47ba0977 is 50, key is test_row_0/A:col10/1733617237374/Put/seqid=0 2024-12-08T00:20:39,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742127_1303 (size=12454) 2024-12-08T00:20:39,631 DEBUG [Thread-747 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0b44b1e5 to 127.0.0.1:62287 2024-12-08T00:20:39,631 DEBUG [Thread-747 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:20:39,640 DEBUG [Thread-743 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7cae6c5c to 127.0.0.1:62287 2024-12-08T00:20:39,641 DEBUG [Thread-743 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:20:39,904 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:39,908 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412089b1233955b0c47c582e9c8126e0a6a95_fabe935a14e4a2f5a6e3e15c47ba0977 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412089b1233955b0c47c582e9c8126e0a6a95_fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:39,908 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/170c828848854c6397e57dc741de5463, store: [table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:39,909 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/170c828848854c6397e57dc741de5463 is 175, key is test_row_0/A:col10/1733617237374/Put/seqid=0 2024-12-08T00:20:39,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742128_1304 (size=31255) 2024-12-08T00:20:40,313 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=534, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/170c828848854c6397e57dc741de5463 2024-12-08T00:20:40,320 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/c9ae9d9f8bad4f0284a8f0d12e0c5aa9 is 50, key is test_row_0/B:col10/1733617237374/Put/seqid=0 2024-12-08T00:20:40,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742129_1305 (size=12301) 2024-12-08T00:20:40,724 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=534 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/c9ae9d9f8bad4f0284a8f0d12e0c5aa9 2024-12-08T00:20:40,730 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/6bcb4fed04744125828520d533cb3685 is 50, key is test_row_0/C:col10/1733617237374/Put/seqid=0 2024-12-08T00:20:40,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742130_1306 (size=12301) 2024-12-08T00:20:41,135 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=534 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/6bcb4fed04744125828520d533cb3685 2024-12-08T00:20:41,139 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/170c828848854c6397e57dc741de5463 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/170c828848854c6397e57dc741de5463 2024-12-08T00:20:41,142 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/170c828848854c6397e57dc741de5463, entries=150, sequenceid=534, filesize=30.5 K 2024-12-08T00:20:41,143 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/c9ae9d9f8bad4f0284a8f0d12e0c5aa9 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/c9ae9d9f8bad4f0284a8f0d12e0c5aa9 2024-12-08T00:20:41,146 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/c9ae9d9f8bad4f0284a8f0d12e0c5aa9, entries=150, sequenceid=534, filesize=12.0 K 2024-12-08T00:20:41,147 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/6bcb4fed04744125828520d533cb3685 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/6bcb4fed04744125828520d533cb3685 2024-12-08T00:20:41,150 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/6bcb4fed04744125828520d533cb3685, entries=150, sequenceid=534, filesize=12.0 K 2024-12-08T00:20:41,151 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=13.42 KB/13740 for fabe935a14e4a2f5a6e3e15c47ba0977 in 1659ms, sequenceid=534, compaction requested=true 2024-12-08T00:20:41,151 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:41,151 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fabe935a14e4a2f5a6e3e15c47ba0977:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:20:41,151 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:41,151 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fabe935a14e4a2f5a6e3e15c47ba0977:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T00:20:41,151 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:41,151 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:20:41,151 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fabe935a14e4a2f5a6e3e15c47ba0977:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:20:41,152 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:20:41,152 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:20:41,152 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94957 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:20:41,152 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38095 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:20:41,152 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): fabe935a14e4a2f5a6e3e15c47ba0977/A is initiating minor compaction (all files) 2024-12-08T00:20:41,153 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): fabe935a14e4a2f5a6e3e15c47ba0977/B is initiating minor compaction (all files) 2024-12-08T00:20:41,153 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fabe935a14e4a2f5a6e3e15c47ba0977/A in TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:41,153 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fabe935a14e4a2f5a6e3e15c47ba0977/B in TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:41,153 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/93c365e172144a0caba9eab60b9f8e29, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/410ec4085924444d994037da7cda9f6c, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/170c828848854c6397e57dc741de5463] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp, totalSize=92.7 K 2024-12-08T00:20:41,153 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:41,153 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/2d98dc46575f40df835cb93aade2cbb7, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/effe634907e947c3a0e87fae0c9a2ff1, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/c9ae9d9f8bad4f0284a8f0d12e0c5aa9] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp, totalSize=37.2 K 2024-12-08T00:20:41,153 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. files: [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/93c365e172144a0caba9eab60b9f8e29, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/410ec4085924444d994037da7cda9f6c, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/170c828848854c6397e57dc741de5463] 2024-12-08T00:20:41,153 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 93c365e172144a0caba9eab60b9f8e29, keycount=150, bloomtype=ROW, size=31.7 K, encoding=NONE, compression=NONE, seqNum=494, earliestPutTs=1733617235547 2024-12-08T00:20:41,153 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 2d98dc46575f40df835cb93aade2cbb7, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=494, earliestPutTs=1733617235547 2024-12-08T00:20:41,153 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 410ec4085924444d994037da7cda9f6c, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=510, earliestPutTs=1733617236174 2024-12-08T00:20:41,153 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting effe634907e947c3a0e87fae0c9a2ff1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=510, earliestPutTs=1733617236174 2024-12-08T00:20:41,154 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 170c828848854c6397e57dc741de5463, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=534, earliestPutTs=1733617237373 2024-12-08T00:20:41,154 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting c9ae9d9f8bad4f0284a8f0d12e0c5aa9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=534, earliestPutTs=1733617237373 2024-12-08T00:20:41,163 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fabe935a14e4a2f5a6e3e15c47ba0977#B#compaction#251 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:20:41,164 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/2428b81a94cb4c8581fce4951fc97507 is 50, key is test_row_0/B:col10/1733617237374/Put/seqid=0 2024-12-08T00:20:41,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742131_1307 (size=13595) 2024-12-08T00:20:41,171 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:41,173 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241208f28d147a53c2430eb9c24a267776eebd_fabe935a14e4a2f5a6e3e15c47ba0977 store=[table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:41,203 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241208f28d147a53c2430eb9c24a267776eebd_fabe935a14e4a2f5a6e3e15c47ba0977, store=[table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:41,204 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208f28d147a53c2430eb9c24a267776eebd_fabe935a14e4a2f5a6e3e15c47ba0977 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:41,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742132_1308 (size=4469) 2024-12-08T00:20:41,209 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fabe935a14e4a2f5a6e3e15c47ba0977#A#compaction#252 average throughput is 0.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:20:41,210 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/48edf2dd29514d05bc2c0cb26b23d2b8 is 175, key is test_row_0/A:col10/1733617237374/Put/seqid=0 2024-12-08T00:20:41,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742133_1309 (size=32549) 2024-12-08T00:20:41,220 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/48edf2dd29514d05bc2c0cb26b23d2b8 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/48edf2dd29514d05bc2c0cb26b23d2b8 2024-12-08T00:20:41,225 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fabe935a14e4a2f5a6e3e15c47ba0977/A of fabe935a14e4a2f5a6e3e15c47ba0977 into 48edf2dd29514d05bc2c0cb26b23d2b8(size=31.8 K), total size for store is 31.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:20:41,225 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:41,225 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977., storeName=fabe935a14e4a2f5a6e3e15c47ba0977/A, priority=13, startTime=1733617241151; duration=0sec 2024-12-08T00:20:41,225 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:20:41,225 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fabe935a14e4a2f5a6e3e15c47ba0977:A 2024-12-08T00:20:41,225 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:20:41,226 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38061 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:20:41,226 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): fabe935a14e4a2f5a6e3e15c47ba0977/C is initiating minor compaction (all files) 2024-12-08T00:20:41,226 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fabe935a14e4a2f5a6e3e15c47ba0977/C in TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:41,226 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/d8a52d4b00d74768abccc60fc271a49d, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/43f1d6f0c2c9440f8f6a7ec82bf0d716, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/6bcb4fed04744125828520d533cb3685] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp, totalSize=37.2 K 2024-12-08T00:20:41,227 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting d8a52d4b00d74768abccc60fc271a49d, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=494, earliestPutTs=1733617235547 2024-12-08T00:20:41,227 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 43f1d6f0c2c9440f8f6a7ec82bf0d716, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=510, earliestPutTs=1733617236174 2024-12-08T00:20:41,227 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6bcb4fed04744125828520d533cb3685, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=534, earliestPutTs=1733617237373 2024-12-08T00:20:41,245 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fabe935a14e4a2f5a6e3e15c47ba0977#C#compaction#253 average throughput is 1.09 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:20:41,245 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/de4215a483994ef5b9914a80e6663ed2 is 50, key is test_row_0/C:col10/1733617237374/Put/seqid=0 2024-12-08T00:20:41,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742134_1310 (size=13561) 2024-12-08T00:20:41,575 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/2428b81a94cb4c8581fce4951fc97507 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/2428b81a94cb4c8581fce4951fc97507 2024-12-08T00:20:41,580 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fabe935a14e4a2f5a6e3e15c47ba0977/B of fabe935a14e4a2f5a6e3e15c47ba0977 into 2428b81a94cb4c8581fce4951fc97507(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:20:41,580 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:41,580 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977., storeName=fabe935a14e4a2f5a6e3e15c47ba0977/B, priority=13, startTime=1733617241151; duration=0sec 2024-12-08T00:20:41,580 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:41,580 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fabe935a14e4a2f5a6e3e15c47ba0977:B 2024-12-08T00:20:41,655 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/de4215a483994ef5b9914a80e6663ed2 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/de4215a483994ef5b9914a80e6663ed2 2024-12-08T00:20:41,659 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fabe935a14e4a2f5a6e3e15c47ba0977/C of fabe935a14e4a2f5a6e3e15c47ba0977 into de4215a483994ef5b9914a80e6663ed2(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:20:41,659 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:41,659 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977., storeName=fabe935a14e4a2f5a6e3e15c47ba0977/C, priority=13, startTime=1733617241151; duration=0sec 2024-12-08T00:20:41,660 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:41,660 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fabe935a14e4a2f5a6e3e15c47ba0977:C 2024-12-08T00:20:41,742 DEBUG [Thread-745 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5c820ef9 to 127.0.0.1:62287 2024-12-08T00:20:41,742 DEBUG [Thread-745 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:20:41,758 DEBUG [Thread-751 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0a4c53ed to 127.0.0.1:62287 2024-12-08T00:20:41,758 DEBUG [Thread-751 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:20:41,759 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-08T00:20:41,759 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 77 2024-12-08T00:20:41,759 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 75 2024-12-08T00:20:41,759 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 66 2024-12-08T00:20:41,759 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 136 2024-12-08T00:20:41,759 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 67 2024-12-08T00:20:41,759 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-08T00:20:41,759 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5540 2024-12-08T00:20:41,759 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5369 2024-12-08T00:20:41,759 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-08T00:20:41,759 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2388 2024-12-08T00:20:41,759 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7164 rows 2024-12-08T00:20:41,759 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2405 2024-12-08T00:20:41,759 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7215 rows 2024-12-08T00:20:41,759 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-08T00:20:41,759 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7a9b9802 to 127.0.0.1:62287 2024-12-08T00:20:41,759 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:20:41,761 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-08T00:20:41,762 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-08T00:20:41,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=63, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-08T00:20:41,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-08T00:20:41,768 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733617241767"}]},"ts":"1733617241767"} 2024-12-08T00:20:41,769 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-08T00:20:41,771 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-08T00:20:41,772 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-08T00:20:41,773 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=65, ppid=64, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=fabe935a14e4a2f5a6e3e15c47ba0977, UNASSIGN}] 2024-12-08T00:20:41,773 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=65, ppid=64, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=fabe935a14e4a2f5a6e3e15c47ba0977, UNASSIGN 2024-12-08T00:20:41,774 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=fabe935a14e4a2f5a6e3e15c47ba0977, regionState=CLOSING, regionLocation=017dd09fb407,36703,1733617179335 2024-12-08T00:20:41,775 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-08T00:20:41,775 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=66, ppid=65, state=RUNNABLE; CloseRegionProcedure fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335}] 2024-12-08T00:20:41,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-08T00:20:41,926 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:41,927 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] handler.UnassignRegionHandler(124): Close fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:41,927 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-08T00:20:41,927 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1681): Closing fabe935a14e4a2f5a6e3e15c47ba0977, disabling compactions & flushes 2024-12-08T00:20:41,927 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:41,927 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:41,927 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. after waiting 0 ms 2024-12-08T00:20:41,927 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:41,927 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(2837): Flushing fabe935a14e4a2f5a6e3e15c47ba0977 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-12-08T00:20:41,927 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=A 2024-12-08T00:20:41,927 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:41,927 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=B 2024-12-08T00:20:41,927 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:41,927 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fabe935a14e4a2f5a6e3e15c47ba0977, store=C 2024-12-08T00:20:41,927 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:41,934 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120835a6272fc90d44e3b133a6c0b5c491c6_fabe935a14e4a2f5a6e3e15c47ba0977 is 50, key is test_row_0/A:col10/1733617239639/Put/seqid=0 2024-12-08T00:20:41,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742135_1311 (size=12454) 2024-12-08T00:20:42,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-08T00:20:42,125 DEBUG [master/017dd09fb407:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 4a133571fbb9d65d8cbb8c5be599e94a changed from -1.0 to 0.0, refreshing cache 2024-12-08T00:20:42,338 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:42,341 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120835a6272fc90d44e3b133a6c0b5c491c6_fabe935a14e4a2f5a6e3e15c47ba0977 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120835a6272fc90d44e3b133a6c0b5c491c6_fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:42,342 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/36f27158ad2b4ccaa274133c699422f9, store: [table=TestAcidGuarantees family=A region=fabe935a14e4a2f5a6e3e15c47ba0977] 2024-12-08T00:20:42,343 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/36f27158ad2b4ccaa274133c699422f9 is 175, key is test_row_0/A:col10/1733617239639/Put/seqid=0 2024-12-08T00:20:42,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742136_1312 (size=31255) 2024-12-08T00:20:42,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-08T00:20:42,747 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=544, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/36f27158ad2b4ccaa274133c699422f9 2024-12-08T00:20:42,754 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/b0e74ee573f443a3a389acbba7d0783e is 50, key is test_row_0/B:col10/1733617239639/Put/seqid=0 2024-12-08T00:20:42,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742137_1313 (size=12301) 2024-12-08T00:20:42,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-08T00:20:43,158 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=544 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/b0e74ee573f443a3a389acbba7d0783e 2024-12-08T00:20:43,165 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/5e92a483893b43f1a94c77a71044f8c4 is 50, key is test_row_0/C:col10/1733617239639/Put/seqid=0 2024-12-08T00:20:43,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742138_1314 (size=12301) 2024-12-08T00:20:43,568 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=544 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/5e92a483893b43f1a94c77a71044f8c4 2024-12-08T00:20:43,573 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/A/36f27158ad2b4ccaa274133c699422f9 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/36f27158ad2b4ccaa274133c699422f9 2024-12-08T00:20:43,576 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/36f27158ad2b4ccaa274133c699422f9, entries=150, sequenceid=544, filesize=30.5 K 2024-12-08T00:20:43,577 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/B/b0e74ee573f443a3a389acbba7d0783e as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/b0e74ee573f443a3a389acbba7d0783e 2024-12-08T00:20:43,580 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/b0e74ee573f443a3a389acbba7d0783e, entries=150, sequenceid=544, filesize=12.0 K 2024-12-08T00:20:43,581 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/.tmp/C/5e92a483893b43f1a94c77a71044f8c4 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/5e92a483893b43f1a94c77a71044f8c4 2024-12-08T00:20:43,584 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/5e92a483893b43f1a94c77a71044f8c4, entries=150, sequenceid=544, filesize=12.0 K 2024-12-08T00:20:43,584 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for fabe935a14e4a2f5a6e3e15c47ba0977 in 1657ms, sequenceid=544, compaction requested=false 2024-12-08T00:20:43,585 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/0bc9bbd7f430460db7a7df82eacc89fa, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/ebc5a73dfac04f1991f1efab1d486dd1, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/45622afb47a6460d97e22e5418e98f12, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/a3073b49c56c435a8625ee96bf1c2b61, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/46bd0440d10e4de0b2b37846157090d7, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/c544c04ef7374d18b6dc5ad2b401233f, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/52299a07be464cb4b183f367682d007f, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/75bd4d745910457fa58200dac6cad25a, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/05e48a06a47f4720b0528e6ab3d57069, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/f3dababedc2847a49e517817ca5c85f9, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/6d2e6fa668374eb0b000871328b5f833, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/99bf666bcefb4ed69a4e9e0d03a9dba6, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/1b16a35cc0134a62b25053b7d595c871, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/e0d73d7bf4d546e8b73c56e203074339, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/f6e1780ff5ef4bca99f38f179b678194, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/f0c976a676894ae0acbaad490e6af01b, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/86e06f8fee344023b196c9631c68c9ad, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/9326fa16abb642748ad49000dd7348ea, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/4d82ad209cde408bafe8a9d9391c1f36, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/7f9247fe2f7c4aa0b792ef3b3a65bf75, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/f95c3a60a958461bb51c782273339764, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/6041a372712a4c8aa781b8b489523d1e, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/2d86c0b631ed4c43b21eb58315e47e80, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/cde9ede3f95b4779994aee82a725b18f, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/b05ee7e560bd44879f964bfe89d616e6, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/10b2501ca2aa4830b3a1cc305cf2f8ac, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/2bff0d4efc534bdfb543d376f20a7f9e, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/0611c4c551fc4bb0bfba825b872a06d8, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/89c0eca53c884a19ae46238738f8b435, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/2b67613b3b1a44e9b7c64d0cc6bfe3bc, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/29408b8440094aaebd19bdf27dbc413f, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/f9afbf351edf47ac84b32b9546069f10, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/2959502627e143928dd258cb37a78605, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/dfc30ad3f20d4ec1ba5b04f67aa9fe5c, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/d69e454259df4d69ad1941b75dcd7abc, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/93c365e172144a0caba9eab60b9f8e29, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/410ec4085924444d994037da7cda9f6c, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/170c828848854c6397e57dc741de5463] to archive 2024-12-08T00:20:43,586 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-08T00:20:43,587 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/0bc9bbd7f430460db7a7df82eacc89fa to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/0bc9bbd7f430460db7a7df82eacc89fa 2024-12-08T00:20:43,589 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/ebc5a73dfac04f1991f1efab1d486dd1 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/ebc5a73dfac04f1991f1efab1d486dd1 2024-12-08T00:20:43,590 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/45622afb47a6460d97e22e5418e98f12 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/45622afb47a6460d97e22e5418e98f12 2024-12-08T00:20:43,591 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/a3073b49c56c435a8625ee96bf1c2b61 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/a3073b49c56c435a8625ee96bf1c2b61 2024-12-08T00:20:43,592 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/46bd0440d10e4de0b2b37846157090d7 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/46bd0440d10e4de0b2b37846157090d7 2024-12-08T00:20:43,593 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/c544c04ef7374d18b6dc5ad2b401233f to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/c544c04ef7374d18b6dc5ad2b401233f 2024-12-08T00:20:43,594 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/52299a07be464cb4b183f367682d007f to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/52299a07be464cb4b183f367682d007f 2024-12-08T00:20:43,595 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/75bd4d745910457fa58200dac6cad25a to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/75bd4d745910457fa58200dac6cad25a 2024-12-08T00:20:43,596 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/05e48a06a47f4720b0528e6ab3d57069 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/05e48a06a47f4720b0528e6ab3d57069 2024-12-08T00:20:43,597 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/f3dababedc2847a49e517817ca5c85f9 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/f3dababedc2847a49e517817ca5c85f9 2024-12-08T00:20:43,598 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/6d2e6fa668374eb0b000871328b5f833 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/6d2e6fa668374eb0b000871328b5f833 2024-12-08T00:20:43,599 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/99bf666bcefb4ed69a4e9e0d03a9dba6 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/99bf666bcefb4ed69a4e9e0d03a9dba6 2024-12-08T00:20:43,600 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/1b16a35cc0134a62b25053b7d595c871 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/1b16a35cc0134a62b25053b7d595c871 2024-12-08T00:20:43,601 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/e0d73d7bf4d546e8b73c56e203074339 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/e0d73d7bf4d546e8b73c56e203074339 2024-12-08T00:20:43,602 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/f6e1780ff5ef4bca99f38f179b678194 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/f6e1780ff5ef4bca99f38f179b678194 2024-12-08T00:20:43,603 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/f0c976a676894ae0acbaad490e6af01b to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/f0c976a676894ae0acbaad490e6af01b 2024-12-08T00:20:43,604 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/86e06f8fee344023b196c9631c68c9ad to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/86e06f8fee344023b196c9631c68c9ad 2024-12-08T00:20:43,605 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/9326fa16abb642748ad49000dd7348ea to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/9326fa16abb642748ad49000dd7348ea 2024-12-08T00:20:43,606 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/4d82ad209cde408bafe8a9d9391c1f36 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/4d82ad209cde408bafe8a9d9391c1f36 2024-12-08T00:20:43,607 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/7f9247fe2f7c4aa0b792ef3b3a65bf75 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/7f9247fe2f7c4aa0b792ef3b3a65bf75 2024-12-08T00:20:43,608 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/f95c3a60a958461bb51c782273339764 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/f95c3a60a958461bb51c782273339764 2024-12-08T00:20:43,609 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/6041a372712a4c8aa781b8b489523d1e to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/6041a372712a4c8aa781b8b489523d1e 2024-12-08T00:20:43,610 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/2d86c0b631ed4c43b21eb58315e47e80 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/2d86c0b631ed4c43b21eb58315e47e80 2024-12-08T00:20:43,611 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/cde9ede3f95b4779994aee82a725b18f to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/cde9ede3f95b4779994aee82a725b18f 2024-12-08T00:20:43,612 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/b05ee7e560bd44879f964bfe89d616e6 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/b05ee7e560bd44879f964bfe89d616e6 2024-12-08T00:20:43,613 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/10b2501ca2aa4830b3a1cc305cf2f8ac to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/10b2501ca2aa4830b3a1cc305cf2f8ac 2024-12-08T00:20:43,614 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/2bff0d4efc534bdfb543d376f20a7f9e to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/2bff0d4efc534bdfb543d376f20a7f9e 2024-12-08T00:20:43,615 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/0611c4c551fc4bb0bfba825b872a06d8 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/0611c4c551fc4bb0bfba825b872a06d8 2024-12-08T00:20:43,616 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/89c0eca53c884a19ae46238738f8b435 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/89c0eca53c884a19ae46238738f8b435 2024-12-08T00:20:43,617 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/2b67613b3b1a44e9b7c64d0cc6bfe3bc to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/2b67613b3b1a44e9b7c64d0cc6bfe3bc 2024-12-08T00:20:43,619 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/29408b8440094aaebd19bdf27dbc413f to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/29408b8440094aaebd19bdf27dbc413f 2024-12-08T00:20:43,620 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/f9afbf351edf47ac84b32b9546069f10 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/f9afbf351edf47ac84b32b9546069f10 2024-12-08T00:20:43,621 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/2959502627e143928dd258cb37a78605 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/2959502627e143928dd258cb37a78605 2024-12-08T00:20:43,622 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/dfc30ad3f20d4ec1ba5b04f67aa9fe5c to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/dfc30ad3f20d4ec1ba5b04f67aa9fe5c 2024-12-08T00:20:43,623 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/d69e454259df4d69ad1941b75dcd7abc to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/d69e454259df4d69ad1941b75dcd7abc 2024-12-08T00:20:43,624 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/93c365e172144a0caba9eab60b9f8e29 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/93c365e172144a0caba9eab60b9f8e29 2024-12-08T00:20:43,625 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/410ec4085924444d994037da7cda9f6c to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/410ec4085924444d994037da7cda9f6c 2024-12-08T00:20:43,626 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/170c828848854c6397e57dc741de5463 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/170c828848854c6397e57dc741de5463 2024-12-08T00:20:43,627 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/1720f9f29927490990f0a4a7121874bb, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/aa96f1918877443b81fd374eb3817e65, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/e95cb016f406476d8dcb8902e982067b, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/679caf93e3b2442a95a0675ba823ca87, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/7650cd9428344cf2a510b77746d2e718, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/94c955e544874371aecf6c9ed5d905cb, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/aed3ff48cddd4332aeb73d8a9e8ffe9a, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/61276c8e7e2044e8ae4e02070f1f71e5, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/a984c31a308f4dbd992baee78e312211, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/12f478a81ecf4e58a681297f341b8574, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/9c38b74869934b22b722837852235b8e, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/dda9a3a1baac43fd998a8e110109776c, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/e6b927bd5cfa44dd84022122cd292dd4, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/6943eeba406c43b197f2fb7a6ababa83, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/172dee4b338646a2a7a78080b7505113, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/f0cd7e9f2700429e9881c8888b4a42df, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/a2bef277e38d465297fc842195e70371, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/03d2f21f914444f1820f41c9b99f0d07, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/8571267574bd49c9871e43ea5dee83ab, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/a36ce5d4ef3b4778af24c02a6d60c0cc, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/50154135ce3b49418889ca8ec66fe813, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/fe1a5caa31e844b6bd0e7437631b1ea6, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/96a69ca2169642e88a2554f78de9d0f0, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/0380c3584cd948cc8bb435906fe933eb, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/f81aa1c2c27841dab1cefe4eb6fea0b7, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/8449e52d37fb4c028d3caf1966268c62, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/3755985a1eec437cb79c29982a661f3b, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/0f2c4a8e3c0b4f65a83d8961c62f9085, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/2a330898349242c79cd710f3a06988de, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/3d92a1487b2b4368803e70ebd5b6bfa4, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/04af11801bd54e7d97c3fd6a6068ef27, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/4779ee347ae74001bc25982c35dffa4e, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/faa2665c3e14468e917b1fe19b33f4bc, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/91ce2d5592844bf8953fcc34c1dc5a40, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/2d98dc46575f40df835cb93aade2cbb7, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/969c690033244d7c91225893a404728f, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/effe634907e947c3a0e87fae0c9a2ff1, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/c9ae9d9f8bad4f0284a8f0d12e0c5aa9] to archive 2024-12-08T00:20:43,628 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-08T00:20:43,630 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/1720f9f29927490990f0a4a7121874bb to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/1720f9f29927490990f0a4a7121874bb 2024-12-08T00:20:43,630 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/aa96f1918877443b81fd374eb3817e65 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/aa96f1918877443b81fd374eb3817e65 2024-12-08T00:20:43,631 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/e95cb016f406476d8dcb8902e982067b to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/e95cb016f406476d8dcb8902e982067b 2024-12-08T00:20:43,632 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/679caf93e3b2442a95a0675ba823ca87 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/679caf93e3b2442a95a0675ba823ca87 2024-12-08T00:20:43,633 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/7650cd9428344cf2a510b77746d2e718 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/7650cd9428344cf2a510b77746d2e718 2024-12-08T00:20:43,634 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/94c955e544874371aecf6c9ed5d905cb to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/94c955e544874371aecf6c9ed5d905cb 2024-12-08T00:20:43,635 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/aed3ff48cddd4332aeb73d8a9e8ffe9a to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/aed3ff48cddd4332aeb73d8a9e8ffe9a 2024-12-08T00:20:43,636 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/61276c8e7e2044e8ae4e02070f1f71e5 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/61276c8e7e2044e8ae4e02070f1f71e5 2024-12-08T00:20:43,637 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/a984c31a308f4dbd992baee78e312211 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/a984c31a308f4dbd992baee78e312211 2024-12-08T00:20:43,638 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/12f478a81ecf4e58a681297f341b8574 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/12f478a81ecf4e58a681297f341b8574 2024-12-08T00:20:43,638 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/9c38b74869934b22b722837852235b8e to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/9c38b74869934b22b722837852235b8e 2024-12-08T00:20:43,639 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/dda9a3a1baac43fd998a8e110109776c to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/dda9a3a1baac43fd998a8e110109776c 2024-12-08T00:20:43,640 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/e6b927bd5cfa44dd84022122cd292dd4 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/e6b927bd5cfa44dd84022122cd292dd4 2024-12-08T00:20:43,641 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/6943eeba406c43b197f2fb7a6ababa83 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/6943eeba406c43b197f2fb7a6ababa83 2024-12-08T00:20:43,642 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/172dee4b338646a2a7a78080b7505113 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/172dee4b338646a2a7a78080b7505113 2024-12-08T00:20:43,644 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/f0cd7e9f2700429e9881c8888b4a42df to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/f0cd7e9f2700429e9881c8888b4a42df 2024-12-08T00:20:43,645 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/a2bef277e38d465297fc842195e70371 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/a2bef277e38d465297fc842195e70371 2024-12-08T00:20:43,646 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/03d2f21f914444f1820f41c9b99f0d07 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/03d2f21f914444f1820f41c9b99f0d07 2024-12-08T00:20:43,647 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/8571267574bd49c9871e43ea5dee83ab to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/8571267574bd49c9871e43ea5dee83ab 2024-12-08T00:20:43,648 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/a36ce5d4ef3b4778af24c02a6d60c0cc to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/a36ce5d4ef3b4778af24c02a6d60c0cc 2024-12-08T00:20:43,649 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/50154135ce3b49418889ca8ec66fe813 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/50154135ce3b49418889ca8ec66fe813 2024-12-08T00:20:43,650 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/fe1a5caa31e844b6bd0e7437631b1ea6 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/fe1a5caa31e844b6bd0e7437631b1ea6 2024-12-08T00:20:43,651 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/96a69ca2169642e88a2554f78de9d0f0 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/96a69ca2169642e88a2554f78de9d0f0 2024-12-08T00:20:43,652 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/0380c3584cd948cc8bb435906fe933eb to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/0380c3584cd948cc8bb435906fe933eb 2024-12-08T00:20:43,653 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/f81aa1c2c27841dab1cefe4eb6fea0b7 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/f81aa1c2c27841dab1cefe4eb6fea0b7 2024-12-08T00:20:43,654 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/8449e52d37fb4c028d3caf1966268c62 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/8449e52d37fb4c028d3caf1966268c62 2024-12-08T00:20:43,655 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/3755985a1eec437cb79c29982a661f3b to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/3755985a1eec437cb79c29982a661f3b 2024-12-08T00:20:43,657 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/0f2c4a8e3c0b4f65a83d8961c62f9085 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/0f2c4a8e3c0b4f65a83d8961c62f9085 2024-12-08T00:20:43,658 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/2a330898349242c79cd710f3a06988de to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/2a330898349242c79cd710f3a06988de 2024-12-08T00:20:43,659 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/3d92a1487b2b4368803e70ebd5b6bfa4 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/3d92a1487b2b4368803e70ebd5b6bfa4 2024-12-08T00:20:43,660 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/04af11801bd54e7d97c3fd6a6068ef27 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/04af11801bd54e7d97c3fd6a6068ef27 2024-12-08T00:20:43,661 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/4779ee347ae74001bc25982c35dffa4e to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/4779ee347ae74001bc25982c35dffa4e 2024-12-08T00:20:43,662 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/faa2665c3e14468e917b1fe19b33f4bc to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/faa2665c3e14468e917b1fe19b33f4bc 2024-12-08T00:20:43,663 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/91ce2d5592844bf8953fcc34c1dc5a40 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/91ce2d5592844bf8953fcc34c1dc5a40 2024-12-08T00:20:43,664 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/2d98dc46575f40df835cb93aade2cbb7 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/2d98dc46575f40df835cb93aade2cbb7 2024-12-08T00:20:43,665 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/969c690033244d7c91225893a404728f to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/969c690033244d7c91225893a404728f 2024-12-08T00:20:43,666 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/effe634907e947c3a0e87fae0c9a2ff1 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/effe634907e947c3a0e87fae0c9a2ff1 2024-12-08T00:20:43,667 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/c9ae9d9f8bad4f0284a8f0d12e0c5aa9 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/c9ae9d9f8bad4f0284a8f0d12e0c5aa9 2024-12-08T00:20:43,669 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/b0b761eadb0848c281ecf071b26cc8be, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/10425cae7b3641b982e91cea8bde1dad, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/db488a4262bc4def84e962940fc3a512, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/9e87a88e1a7b4cd8bdf83b1ff1171345, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/84b92731a1df4e2bb5399c3698cd3d3d, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/b4732d9c4c104f5bae33f6fb35b784b6, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/9dfad7e4b1424f88a9c18038f1a7024a, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/5e4f2f12b39b4b98862a28037eee2ecb, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/c088ddf17e7c484b954d055c1f95a81d, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/f7f3e9725fc74b6181be145f7a76e435, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/219cd8825e9e41fcb0ce46bb6ece5b5d, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/82e6e9ba78c540b88777fb9905536b13, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/47e48423f633482c909890122e9c90fc, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/76fdac4988a6483eb1bee14e53749a5f, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/5c852641ea9d418e8adae1bcee6a10d2, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/4eba1c0f04424bad927e89195a2b7365, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/a6c599a239a54212936d35ca192f3439, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/2b19fdc90b8246578fb9a406e39eaefc, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/b7f258f78ddd42c491582fcc69c79183, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/816ea7d000104ac885a8f10c796bf2f4, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/9a84cebd03ae4725b8547aaccec403d5, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/5b969c71c875494e99f1c679b3d7ba13, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/d76fecdc497a4f5ab38bb418895a7f65, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/67ae085ea1044a3b97ef3e3ce39a35fd, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/bc0ef8c104114203bb24f3708a0954b4, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/d951ca2b6d194ca084b8ae3aab876bdf, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/01eb76e42bc64b018eab5ab32e596916, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/4e9a755de86c43c394ea814f0ab54575, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/31c8585a849b447389da080e25484cdc, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/5cfdd9a330814b7dac308249ab90bc15, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/c17bd397a2f54bb5ac68b5a364e8ad05, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/14693b018df74543a51c198a51ddfedd, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/2d1098b61f0644caa3a7b8391d900f63, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/d8a52d4b00d74768abccc60fc271a49d, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/4d95852daab841c8b96bb2af6df904e8, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/43f1d6f0c2c9440f8f6a7ec82bf0d716, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/6bcb4fed04744125828520d533cb3685] to archive 2024-12-08T00:20:43,670 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-08T00:20:43,671 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/b0b761eadb0848c281ecf071b26cc8be to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/b0b761eadb0848c281ecf071b26cc8be 2024-12-08T00:20:43,672 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/10425cae7b3641b982e91cea8bde1dad to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/10425cae7b3641b982e91cea8bde1dad 2024-12-08T00:20:43,673 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/db488a4262bc4def84e962940fc3a512 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/db488a4262bc4def84e962940fc3a512 2024-12-08T00:20:43,674 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/9e87a88e1a7b4cd8bdf83b1ff1171345 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/9e87a88e1a7b4cd8bdf83b1ff1171345 2024-12-08T00:20:43,675 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/84b92731a1df4e2bb5399c3698cd3d3d to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/84b92731a1df4e2bb5399c3698cd3d3d 2024-12-08T00:20:43,676 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/b4732d9c4c104f5bae33f6fb35b784b6 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/b4732d9c4c104f5bae33f6fb35b784b6 2024-12-08T00:20:43,677 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/9dfad7e4b1424f88a9c18038f1a7024a to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/9dfad7e4b1424f88a9c18038f1a7024a 2024-12-08T00:20:43,678 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/5e4f2f12b39b4b98862a28037eee2ecb to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/5e4f2f12b39b4b98862a28037eee2ecb 2024-12-08T00:20:43,679 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/c088ddf17e7c484b954d055c1f95a81d to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/c088ddf17e7c484b954d055c1f95a81d 2024-12-08T00:20:43,680 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/f7f3e9725fc74b6181be145f7a76e435 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/f7f3e9725fc74b6181be145f7a76e435 2024-12-08T00:20:43,681 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/219cd8825e9e41fcb0ce46bb6ece5b5d to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/219cd8825e9e41fcb0ce46bb6ece5b5d 2024-12-08T00:20:43,682 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/82e6e9ba78c540b88777fb9905536b13 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/82e6e9ba78c540b88777fb9905536b13 2024-12-08T00:20:43,683 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/47e48423f633482c909890122e9c90fc to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/47e48423f633482c909890122e9c90fc 2024-12-08T00:20:43,684 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/76fdac4988a6483eb1bee14e53749a5f to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/76fdac4988a6483eb1bee14e53749a5f 2024-12-08T00:20:43,684 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/5c852641ea9d418e8adae1bcee6a10d2 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/5c852641ea9d418e8adae1bcee6a10d2 2024-12-08T00:20:43,685 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/4eba1c0f04424bad927e89195a2b7365 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/4eba1c0f04424bad927e89195a2b7365 2024-12-08T00:20:43,686 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/a6c599a239a54212936d35ca192f3439 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/a6c599a239a54212936d35ca192f3439 2024-12-08T00:20:43,687 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/2b19fdc90b8246578fb9a406e39eaefc to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/2b19fdc90b8246578fb9a406e39eaefc 2024-12-08T00:20:43,688 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/b7f258f78ddd42c491582fcc69c79183 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/b7f258f78ddd42c491582fcc69c79183 2024-12-08T00:20:43,689 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/816ea7d000104ac885a8f10c796bf2f4 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/816ea7d000104ac885a8f10c796bf2f4 2024-12-08T00:20:43,690 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/9a84cebd03ae4725b8547aaccec403d5 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/9a84cebd03ae4725b8547aaccec403d5 2024-12-08T00:20:43,690 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/5b969c71c875494e99f1c679b3d7ba13 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/5b969c71c875494e99f1c679b3d7ba13 2024-12-08T00:20:43,691 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/d76fecdc497a4f5ab38bb418895a7f65 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/d76fecdc497a4f5ab38bb418895a7f65 2024-12-08T00:20:43,692 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/67ae085ea1044a3b97ef3e3ce39a35fd to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/67ae085ea1044a3b97ef3e3ce39a35fd 2024-12-08T00:20:43,693 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/bc0ef8c104114203bb24f3708a0954b4 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/bc0ef8c104114203bb24f3708a0954b4 2024-12-08T00:20:43,694 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/d951ca2b6d194ca084b8ae3aab876bdf to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/d951ca2b6d194ca084b8ae3aab876bdf 2024-12-08T00:20:43,695 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/01eb76e42bc64b018eab5ab32e596916 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/01eb76e42bc64b018eab5ab32e596916 2024-12-08T00:20:43,696 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/4e9a755de86c43c394ea814f0ab54575 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/4e9a755de86c43c394ea814f0ab54575 2024-12-08T00:20:43,697 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/31c8585a849b447389da080e25484cdc to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/31c8585a849b447389da080e25484cdc 2024-12-08T00:20:43,698 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/5cfdd9a330814b7dac308249ab90bc15 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/5cfdd9a330814b7dac308249ab90bc15 2024-12-08T00:20:43,699 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/c17bd397a2f54bb5ac68b5a364e8ad05 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/c17bd397a2f54bb5ac68b5a364e8ad05 2024-12-08T00:20:43,700 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/14693b018df74543a51c198a51ddfedd to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/14693b018df74543a51c198a51ddfedd 2024-12-08T00:20:43,701 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/2d1098b61f0644caa3a7b8391d900f63 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/2d1098b61f0644caa3a7b8391d900f63 2024-12-08T00:20:43,702 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/d8a52d4b00d74768abccc60fc271a49d to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/d8a52d4b00d74768abccc60fc271a49d 2024-12-08T00:20:43,703 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/4d95852daab841c8b96bb2af6df904e8 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/4d95852daab841c8b96bb2af6df904e8 2024-12-08T00:20:43,704 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/43f1d6f0c2c9440f8f6a7ec82bf0d716 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/43f1d6f0c2c9440f8f6a7ec82bf0d716 2024-12-08T00:20:43,705 DEBUG [StoreCloser-TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/6bcb4fed04744125828520d533cb3685 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/6bcb4fed04744125828520d533cb3685 2024-12-08T00:20:43,710 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/recovered.edits/547.seqid, newMaxSeqId=547, maxSeqId=4 2024-12-08T00:20:43,710 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977. 2024-12-08T00:20:43,711 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1635): Region close journal for fabe935a14e4a2f5a6e3e15c47ba0977: 2024-12-08T00:20:43,712 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] handler.UnassignRegionHandler(170): Closed fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:43,712 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=fabe935a14e4a2f5a6e3e15c47ba0977, regionState=CLOSED 2024-12-08T00:20:43,714 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=66, resume processing ppid=65 2024-12-08T00:20:43,714 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=66, ppid=65, state=SUCCESS; CloseRegionProcedure fabe935a14e4a2f5a6e3e15c47ba0977, server=017dd09fb407,36703,1733617179335 in 1.9380 sec 2024-12-08T00:20:43,715 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=65, resume processing ppid=64 2024-12-08T00:20:43,715 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=65, ppid=64, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=fabe935a14e4a2f5a6e3e15c47ba0977, UNASSIGN in 1.9410 sec 2024-12-08T00:20:43,717 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=64, resume processing ppid=63 2024-12-08T00:20:43,717 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=64, ppid=63, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.9440 sec 2024-12-08T00:20:43,718 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733617243718"}]},"ts":"1733617243718"} 2024-12-08T00:20:43,719 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-08T00:20:43,721 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-08T00:20:43,722 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=63, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.9590 sec 2024-12-08T00:20:43,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-08T00:20:43,871 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 63 completed 2024-12-08T00:20:43,871 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-08T00:20:43,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=67, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T00:20:43,873 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=67, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T00:20:43,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-08T00:20:43,873 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=67, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T00:20:43,875 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:43,877 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A, FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B, FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C, FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/recovered.edits] 2024-12-08T00:20:43,880 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/36f27158ad2b4ccaa274133c699422f9 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/36f27158ad2b4ccaa274133c699422f9 2024-12-08T00:20:43,881 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/48edf2dd29514d05bc2c0cb26b23d2b8 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/A/48edf2dd29514d05bc2c0cb26b23d2b8 2024-12-08T00:20:43,883 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/2428b81a94cb4c8581fce4951fc97507 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/2428b81a94cb4c8581fce4951fc97507 2024-12-08T00:20:43,884 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/b0e74ee573f443a3a389acbba7d0783e to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/B/b0e74ee573f443a3a389acbba7d0783e 2024-12-08T00:20:43,886 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/5e92a483893b43f1a94c77a71044f8c4 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/5e92a483893b43f1a94c77a71044f8c4 2024-12-08T00:20:43,887 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/de4215a483994ef5b9914a80e6663ed2 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/C/de4215a483994ef5b9914a80e6663ed2 2024-12-08T00:20:43,890 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/recovered.edits/547.seqid to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977/recovered.edits/547.seqid 2024-12-08T00:20:43,890 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:43,890 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-08T00:20:43,891 DEBUG [PEWorker-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-08T00:20:43,891 DEBUG [PEWorker-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-08T00:20:43,895 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412080073f921b3cc467ebc74e7138d2e7ec5_fabe935a14e4a2f5a6e3e15c47ba0977 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412080073f921b3cc467ebc74e7138d2e7ec5_fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:43,897 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120803f5af8b9c1941b78bf98a11273b1afb_fabe935a14e4a2f5a6e3e15c47ba0977 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120803f5af8b9c1941b78bf98a11273b1afb_fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:43,898 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412080a5e5dbde5c14165b85f1268a0d66998_fabe935a14e4a2f5a6e3e15c47ba0977 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412080a5e5dbde5c14165b85f1268a0d66998_fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:43,899 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120818b564c2abe9403194f66af28ce57a4e_fabe935a14e4a2f5a6e3e15c47ba0977 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120818b564c2abe9403194f66af28ce57a4e_fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:43,900 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412082d94610f22114bb1b3301cbef909771e_fabe935a14e4a2f5a6e3e15c47ba0977 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412082d94610f22114bb1b3301cbef909771e_fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:43,901 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412082e900a9d054342f1a07d64b1d2e6a6d0_fabe935a14e4a2f5a6e3e15c47ba0977 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412082e900a9d054342f1a07d64b1d2e6a6d0_fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:43,902 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208308d9cca410c405ba4d8699ab6032263_fabe935a14e4a2f5a6e3e15c47ba0977 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208308d9cca410c405ba4d8699ab6032263_fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:43,903 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120831a753c7d4ee4b08ad553b42a5f095cb_fabe935a14e4a2f5a6e3e15c47ba0977 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120831a753c7d4ee4b08ad553b42a5f095cb_fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:43,904 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412083304ce6e98cf4eb596add34867f3f1b7_fabe935a14e4a2f5a6e3e15c47ba0977 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412083304ce6e98cf4eb596add34867f3f1b7_fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:43,905 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120835a6272fc90d44e3b133a6c0b5c491c6_fabe935a14e4a2f5a6e3e15c47ba0977 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120835a6272fc90d44e3b133a6c0b5c491c6_fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:43,906 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120836a0c280a92044aab05efb7316c191b3_fabe935a14e4a2f5a6e3e15c47ba0977 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120836a0c280a92044aab05efb7316c191b3_fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:43,907 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412084238f060e4c84666adee277c08c0977b_fabe935a14e4a2f5a6e3e15c47ba0977 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412084238f060e4c84666adee277c08c0977b_fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:43,909 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120844ef17b570f246b284e81b62bcb2d8e6_fabe935a14e4a2f5a6e3e15c47ba0977 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120844ef17b570f246b284e81b62bcb2d8e6_fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:43,910 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412085b7340e4b6734a9da9c5831aecc4158e_fabe935a14e4a2f5a6e3e15c47ba0977 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412085b7340e4b6734a9da9c5831aecc4158e_fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:43,911 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208667814061afa4714b0a2e4813bcb9172_fabe935a14e4a2f5a6e3e15c47ba0977 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208667814061afa4714b0a2e4813bcb9172_fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:43,912 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412087051836a414b4eb382f95f27559dc0ca_fabe935a14e4a2f5a6e3e15c47ba0977 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412087051836a414b4eb382f95f27559dc0ca_fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:43,913 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412087d3aa407c937493e981ef621abd187c3_fabe935a14e4a2f5a6e3e15c47ba0977 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412087d3aa407c937493e981ef621abd187c3_fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:43,915 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412087ebe32e9aed2472d9bb65660370cc3e9_fabe935a14e4a2f5a6e3e15c47ba0977 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412087ebe32e9aed2472d9bb65660370cc3e9_fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:43,916 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412087fbbecbe862d41f78aa73a8eef3cacfe_fabe935a14e4a2f5a6e3e15c47ba0977 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412087fbbecbe862d41f78aa73a8eef3cacfe_fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:43,917 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120881f429573bf3495589fa56e93a451230_fabe935a14e4a2f5a6e3e15c47ba0977 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120881f429573bf3495589fa56e93a451230_fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:43,918 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412088569cc7366ca41119564072ce3cf87b6_fabe935a14e4a2f5a6e3e15c47ba0977 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412088569cc7366ca41119564072ce3cf87b6_fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:43,919 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412089b1233955b0c47c582e9c8126e0a6a95_fabe935a14e4a2f5a6e3e15c47ba0977 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412089b1233955b0c47c582e9c8126e0a6a95_fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:43,920 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208a5f71944a47b4242bc82973bb5109783_fabe935a14e4a2f5a6e3e15c47ba0977 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208a5f71944a47b4242bc82973bb5109783_fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:43,921 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208ba4bd69d9bb448f1b1103b22c5e053e7_fabe935a14e4a2f5a6e3e15c47ba0977 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208ba4bd69d9bb448f1b1103b22c5e053e7_fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:43,922 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208c1359021a510405ebe113af6bf47221d_fabe935a14e4a2f5a6e3e15c47ba0977 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208c1359021a510405ebe113af6bf47221d_fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:43,923 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208c756be75a4404f5a8a41bb012af5ba3a_fabe935a14e4a2f5a6e3e15c47ba0977 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208c756be75a4404f5a8a41bb012af5ba3a_fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:43,924 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208cab995beabad40b3956189754515038a_fabe935a14e4a2f5a6e3e15c47ba0977 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208cab995beabad40b3956189754515038a_fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:43,925 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208fce29592d5d944b0844d950319d8de4c_fabe935a14e4a2f5a6e3e15c47ba0977 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208fce29592d5d944b0844d950319d8de4c_fabe935a14e4a2f5a6e3e15c47ba0977 2024-12-08T00:20:43,925 DEBUG [PEWorker-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-08T00:20:43,927 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=67, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T00:20:43,929 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-08T00:20:43,932 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-12-08T00:20:43,933 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=67, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T00:20:43,933 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-12-08T00:20:43,933 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733617243933"}]},"ts":"9223372036854775807"} 2024-12-08T00:20:43,934 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-08T00:20:43,934 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => fabe935a14e4a2f5a6e3e15c47ba0977, NAME => 'TestAcidGuarantees,,1733617214303.fabe935a14e4a2f5a6e3e15c47ba0977.', STARTKEY => '', ENDKEY => ''}] 2024-12-08T00:20:43,935 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-12-08T00:20:43,935 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733617243935"}]},"ts":"9223372036854775807"} 2024-12-08T00:20:43,936 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-08T00:20:43,938 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=67, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T00:20:43,939 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=67, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 67 msec 2024-12-08T00:20:43,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-08T00:20:43,974 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 67 completed 2024-12-08T00:20:43,985 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=241 (was 238) Potentially hanging thread: hconnection-0x4da31e77-shared-pool-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-72039625_22 at /127.0.0.1:50760 [Waiting for operation #766] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x4da31e77-shared-pool-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-72039625_22 at /127.0.0.1:50766 [Waiting for operation #754] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x4da31e77-shared-pool-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1079539836_22 at /127.0.0.1:47454 [Waiting for operation #868] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1079539836_22 at /127.0.0.1:34444 [Waiting for operation #947] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9fef9b88-fce3-0eb6-cbce-4bb545ae17d4/cluster_90f0d14f-daf9-4dbb-8963-b50f17f51d54/dfs/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9fef9b88-fce3-0eb6-cbce-4bb545ae17d4/cluster_90f0d14f-daf9-4dbb-8963-b50f17f51d54/dfs/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x4da31e77-shared-pool-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=459 (was 452) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=474 (was 392) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=7693 (was 7836) 2024-12-08T00:20:43,995 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=241, OpenFileDescriptor=459, MaxFileDescriptor=1048576, SystemLoadAverage=474, ProcessCount=11, AvailableMemoryMB=7693 2024-12-08T00:20:43,996 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-08T00:20:43,997 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T00:20:43,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=68, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-08T00:20:43,998 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-08T00:20:43,999 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:43,999 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 68 2024-12-08T00:20:43,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-08T00:20:43,999 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-08T00:20:44,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742139_1315 (size=963) 2024-12-08T00:20:44,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-08T00:20:44,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-08T00:20:44,407 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3 2024-12-08T00:20:44,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742140_1316 (size=53) 2024-12-08T00:20:44,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-08T00:20:44,813 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:20:44,813 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing d6019a516c33d3d08395be7add424e27, disabling compactions & flushes 2024-12-08T00:20:44,813 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:44,813 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:44,813 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. after waiting 0 ms 2024-12-08T00:20:44,813 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:44,813 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:44,813 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for d6019a516c33d3d08395be7add424e27: 2024-12-08T00:20:44,814 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-08T00:20:44,815 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733617244814"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733617244814"}]},"ts":"1733617244814"} 2024-12-08T00:20:44,816 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-08T00:20:44,816 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-08T00:20:44,816 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733617244816"}]},"ts":"1733617244816"} 2024-12-08T00:20:44,817 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-08T00:20:44,821 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=69, ppid=68, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d6019a516c33d3d08395be7add424e27, ASSIGN}] 2024-12-08T00:20:44,822 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=69, ppid=68, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d6019a516c33d3d08395be7add424e27, ASSIGN 2024-12-08T00:20:44,822 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=69, ppid=68, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=d6019a516c33d3d08395be7add424e27, ASSIGN; state=OFFLINE, location=017dd09fb407,36703,1733617179335; forceNewPlan=false, retain=false 2024-12-08T00:20:44,973 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=69 updating hbase:meta row=d6019a516c33d3d08395be7add424e27, regionState=OPENING, regionLocation=017dd09fb407,36703,1733617179335 2024-12-08T00:20:44,974 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=70, ppid=69, state=RUNNABLE; OpenRegionProcedure d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335}] 2024-12-08T00:20:45,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-08T00:20:45,126 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:45,128 INFO [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:45,129 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(7285): Opening region: {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} 2024-12-08T00:20:45,129 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees d6019a516c33d3d08395be7add424e27 2024-12-08T00:20:45,129 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:20:45,129 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(7327): checking encryption for d6019a516c33d3d08395be7add424e27 2024-12-08T00:20:45,129 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(7330): checking classloading for d6019a516c33d3d08395be7add424e27 2024-12-08T00:20:45,130 INFO [StoreOpener-d6019a516c33d3d08395be7add424e27-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region d6019a516c33d3d08395be7add424e27 2024-12-08T00:20:45,131 INFO [StoreOpener-d6019a516c33d3d08395be7add424e27-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-08T00:20:45,132 INFO [StoreOpener-d6019a516c33d3d08395be7add424e27-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d6019a516c33d3d08395be7add424e27 columnFamilyName A 2024-12-08T00:20:45,132 DEBUG [StoreOpener-d6019a516c33d3d08395be7add424e27-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:45,132 INFO [StoreOpener-d6019a516c33d3d08395be7add424e27-1 {}] regionserver.HStore(327): Store=d6019a516c33d3d08395be7add424e27/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:20:45,132 INFO [StoreOpener-d6019a516c33d3d08395be7add424e27-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region d6019a516c33d3d08395be7add424e27 2024-12-08T00:20:45,133 INFO [StoreOpener-d6019a516c33d3d08395be7add424e27-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-08T00:20:45,133 INFO [StoreOpener-d6019a516c33d3d08395be7add424e27-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d6019a516c33d3d08395be7add424e27 columnFamilyName B 2024-12-08T00:20:45,133 DEBUG [StoreOpener-d6019a516c33d3d08395be7add424e27-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:45,134 INFO [StoreOpener-d6019a516c33d3d08395be7add424e27-1 {}] regionserver.HStore(327): Store=d6019a516c33d3d08395be7add424e27/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:20:45,134 INFO [StoreOpener-d6019a516c33d3d08395be7add424e27-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region d6019a516c33d3d08395be7add424e27 2024-12-08T00:20:45,134 INFO [StoreOpener-d6019a516c33d3d08395be7add424e27-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-08T00:20:45,135 INFO [StoreOpener-d6019a516c33d3d08395be7add424e27-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d6019a516c33d3d08395be7add424e27 columnFamilyName C 2024-12-08T00:20:45,135 DEBUG [StoreOpener-d6019a516c33d3d08395be7add424e27-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:20:45,135 INFO [StoreOpener-d6019a516c33d3d08395be7add424e27-1 {}] regionserver.HStore(327): Store=d6019a516c33d3d08395be7add424e27/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:20:45,135 INFO [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:45,136 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27 2024-12-08T00:20:45,136 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27 2024-12-08T00:20:45,137 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-08T00:20:45,139 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(1085): writing seq id for d6019a516c33d3d08395be7add424e27 2024-12-08T00:20:45,141 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T00:20:45,141 INFO [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(1102): Opened d6019a516c33d3d08395be7add424e27; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74501823, jitterRate=0.11016367375850677}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-08T00:20:45,142 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(1001): Region open journal for d6019a516c33d3d08395be7add424e27: 2024-12-08T00:20:45,143 INFO [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27., pid=70, masterSystemTime=1733617245126 2024-12-08T00:20:45,144 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:45,144 INFO [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:45,144 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=69 updating hbase:meta row=d6019a516c33d3d08395be7add424e27, regionState=OPEN, openSeqNum=2, regionLocation=017dd09fb407,36703,1733617179335 2024-12-08T00:20:45,146 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=70, resume processing ppid=69 2024-12-08T00:20:45,146 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=70, ppid=69, state=SUCCESS; OpenRegionProcedure d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 in 171 msec 2024-12-08T00:20:45,148 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=69, resume processing ppid=68 2024-12-08T00:20:45,148 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=69, ppid=68, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=d6019a516c33d3d08395be7add424e27, ASSIGN in 325 msec 2024-12-08T00:20:45,148 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-08T00:20:45,148 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733617245148"}]},"ts":"1733617245148"} 2024-12-08T00:20:45,149 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-08T00:20:45,152 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-08T00:20:45,153 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=68, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1550 sec 2024-12-08T00:20:46,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-08T00:20:46,103 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 68 completed 2024-12-08T00:20:46,105 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3a569490 to 127.0.0.1:62287 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3c1ac389 2024-12-08T00:20:46,108 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@44645c55, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:20:46,110 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:20:46,111 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37690, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:20:46,112 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-08T00:20:46,113 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55540, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-08T00:20:46,115 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6862e3ce to 127.0.0.1:62287 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@28e73c0 2024-12-08T00:20:46,118 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@64ee0130, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:20:46,119 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0d296fed to 127.0.0.1:62287 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7c480dfb 2024-12-08T00:20:46,121 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@683b64c3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:20:46,122 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x08d0caa5 to 127.0.0.1:62287 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@34cb3991 2024-12-08T00:20:46,125 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7e55eb7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:20:46,126 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x43f04e0e to 127.0.0.1:62287 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2e9ae050 2024-12-08T00:20:46,129 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a703d2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:20:46,130 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x560ec309 to 127.0.0.1:62287 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2fef31f8 2024-12-08T00:20:46,132 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14ed1e44, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:20:46,133 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0eb04aeb to 127.0.0.1:62287 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@72537a47 2024-12-08T00:20:46,136 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@88aa519, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:20:46,137 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6a0e9c8f to 127.0.0.1:62287 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@36642cb 2024-12-08T00:20:46,139 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5e998dd3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:20:46,140 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0d68f787 to 127.0.0.1:62287 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3c299cfb 2024-12-08T00:20:46,143 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e4c79b8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:20:46,144 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x10e6bf6a to 127.0.0.1:62287 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@605827c9 2024-12-08T00:20:46,146 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d1403c3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:20:46,147 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1730a60f to 127.0.0.1:62287 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3677bd4f 2024-12-08T00:20:46,149 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3bf0ba59, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:20:46,152 DEBUG [hconnection-0x21d37bff-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:20:46,153 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37706, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:20:46,156 DEBUG [hconnection-0x619fd260-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:20:46,157 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37718, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:20:46,158 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T00:20:46,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees 2024-12-08T00:20:46,160 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:20:46,160 DEBUG [hconnection-0xeb821cf-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:20:46,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-08T00:20:46,161 DEBUG [hconnection-0x7ecd06dc-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:20:46,161 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:20:46,161 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:20:46,161 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37732, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:20:46,163 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37748, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:20:46,169 DEBUG [hconnection-0x694711a4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:20:46,170 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37764, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:20:46,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on d6019a516c33d3d08395be7add424e27 2024-12-08T00:20:46,175 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d6019a516c33d3d08395be7add424e27 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-08T00:20:46,175 DEBUG [hconnection-0x6aa80aec-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:20:46,176 DEBUG [hconnection-0x3724815c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:20:46,176 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37772, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:20:46,177 DEBUG [hconnection-0x4812d87f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:20:46,177 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37778, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:20:46,178 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37790, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:20:46,179 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=A 2024-12-08T00:20:46,179 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:46,179 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=B 2024-12-08T00:20:46,179 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:46,179 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=C 2024-12-08T00:20:46,179 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:46,188 DEBUG [hconnection-0x16fe860a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:20:46,189 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37794, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:20:46,192 DEBUG [hconnection-0x2b6a1553-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:20:46,193 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37804, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:20:46,211 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/594419844177417ebb9c42cf6019af4a is 50, key is test_row_0/A:col10/1733617246174/Put/seqid=0 2024-12-08T00:20:46,224 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:46,224 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:46,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37790 deadline: 1733617306216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:46,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37778 deadline: 1733617306217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:46,224 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:46,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37732 deadline: 1733617306218, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:46,225 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:46,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37772 deadline: 1733617306219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:46,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742141_1317 (size=14341) 2024-12-08T00:20:46,225 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/594419844177417ebb9c42cf6019af4a 2024-12-08T00:20:46,227 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:46,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37748 deadline: 1733617306221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:46,256 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/dd70acd3e07245a5980486f9e7b4f9b2 is 50, key is test_row_0/B:col10/1733617246174/Put/seqid=0 2024-12-08T00:20:46,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-08T00:20:46,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742142_1318 (size=12001) 2024-12-08T00:20:46,274 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/dd70acd3e07245a5980486f9e7b4f9b2 2024-12-08T00:20:46,306 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/df06d91414074b13aba91a24a0bc123f is 50, key is test_row_0/C:col10/1733617246174/Put/seqid=0 2024-12-08T00:20:46,313 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:46,314 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-08T00:20:46,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:46,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. as already flushing 2024-12-08T00:20:46,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:46,314 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:46,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:46,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:46,328 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:46,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37790 deadline: 1733617306325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:46,328 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:46,328 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:46,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37778 deadline: 1733617306326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:46,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37772 deadline: 1733617306326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:46,328 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:46,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37732 deadline: 1733617306325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:46,331 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:46,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37748 deadline: 1733617306329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:46,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742143_1319 (size=12001) 2024-12-08T00:20:46,339 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/df06d91414074b13aba91a24a0bc123f 2024-12-08T00:20:46,348 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/594419844177417ebb9c42cf6019af4a as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/594419844177417ebb9c42cf6019af4a 2024-12-08T00:20:46,353 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/594419844177417ebb9c42cf6019af4a, entries=200, sequenceid=13, filesize=14.0 K 2024-12-08T00:20:46,355 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/dd70acd3e07245a5980486f9e7b4f9b2 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/dd70acd3e07245a5980486f9e7b4f9b2 2024-12-08T00:20:46,360 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/dd70acd3e07245a5980486f9e7b4f9b2, entries=150, sequenceid=13, filesize=11.7 K 2024-12-08T00:20:46,361 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/df06d91414074b13aba91a24a0bc123f as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/df06d91414074b13aba91a24a0bc123f 2024-12-08T00:20:46,366 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/df06d91414074b13aba91a24a0bc123f, entries=150, sequenceid=13, filesize=11.7 K 2024-12-08T00:20:46,367 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for d6019a516c33d3d08395be7add424e27 in 193ms, sequenceid=13, compaction requested=false 2024-12-08T00:20:46,367 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d6019a516c33d3d08395be7add424e27: 2024-12-08T00:20:46,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-08T00:20:46,467 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:46,467 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-08T00:20:46,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:46,468 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2837): Flushing d6019a516c33d3d08395be7add424e27 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-08T00:20:46,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=A 2024-12-08T00:20:46,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:46,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=B 2024-12-08T00:20:46,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:46,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=C 2024-12-08T00:20:46,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:46,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/46979a23024f4c18bd751aed22be1f0b is 50, key is test_row_0/A:col10/1733617246218/Put/seqid=0 2024-12-08T00:20:46,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742144_1320 (size=12001) 2024-12-08T00:20:46,526 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/46979a23024f4c18bd751aed22be1f0b 2024-12-08T00:20:46,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on d6019a516c33d3d08395be7add424e27 2024-12-08T00:20:46,535 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. as already flushing 2024-12-08T00:20:46,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/192453ed55f147d3a663fd9bd6a9bdd9 is 50, key is test_row_0/B:col10/1733617246218/Put/seqid=0 2024-12-08T00:20:46,544 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:46,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37748 deadline: 1733617306540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:46,546 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:46,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37772 deadline: 1733617306541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:46,547 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:46,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37790 deadline: 1733617306542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:46,547 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:46,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37732 deadline: 1733617306543, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:46,547 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:46,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37778 deadline: 1733617306543, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:46,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742145_1321 (size=12001) 2024-12-08T00:20:46,559 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/192453ed55f147d3a663fd9bd6a9bdd9 2024-12-08T00:20:46,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/692471317dd1457e9b9f3c832b8b5de9 is 50, key is test_row_0/C:col10/1733617246218/Put/seqid=0 2024-12-08T00:20:46,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742146_1322 (size=12001) 2024-12-08T00:20:46,592 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/692471317dd1457e9b9f3c832b8b5de9 2024-12-08T00:20:46,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/46979a23024f4c18bd751aed22be1f0b as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/46979a23024f4c18bd751aed22be1f0b 2024-12-08T00:20:46,605 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/46979a23024f4c18bd751aed22be1f0b, entries=150, sequenceid=37, filesize=11.7 K 2024-12-08T00:20:46,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/192453ed55f147d3a663fd9bd6a9bdd9 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/192453ed55f147d3a663fd9bd6a9bdd9 2024-12-08T00:20:46,615 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/192453ed55f147d3a663fd9bd6a9bdd9, entries=150, sequenceid=37, filesize=11.7 K 2024-12-08T00:20:46,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/692471317dd1457e9b9f3c832b8b5de9 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/692471317dd1457e9b9f3c832b8b5de9 2024-12-08T00:20:46,633 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/692471317dd1457e9b9f3c832b8b5de9, entries=150, sequenceid=37, filesize=11.7 K 2024-12-08T00:20:46,634 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for d6019a516c33d3d08395be7add424e27 in 166ms, sequenceid=37, compaction requested=false 2024-12-08T00:20:46,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2538): Flush status journal for d6019a516c33d3d08395be7add424e27: 2024-12-08T00:20:46,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:46,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-12-08T00:20:46,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4106): Remote procedure done, pid=72 2024-12-08T00:20:46,637 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=72, resume processing ppid=71 2024-12-08T00:20:46,637 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=72, ppid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 474 msec 2024-12-08T00:20:46,639 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees in 480 msec 2024-12-08T00:20:46,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on d6019a516c33d3d08395be7add424e27 2024-12-08T00:20:46,648 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d6019a516c33d3d08395be7add424e27 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-08T00:20:46,648 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=A 2024-12-08T00:20:46,648 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:46,648 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=B 2024-12-08T00:20:46,648 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:46,648 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=C 2024-12-08T00:20:46,648 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:46,654 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/d84710f8762946ed9cd1016be69aba06 is 50, key is test_row_0/A:col10/1733617246647/Put/seqid=0 2024-12-08T00:20:46,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742147_1323 (size=12001) 2024-12-08T00:20:46,661 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/d84710f8762946ed9cd1016be69aba06 2024-12-08T00:20:46,667 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:46,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37748 deadline: 1733617306663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:46,669 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:46,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37790 deadline: 1733617306665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:46,669 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:46,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37732 deadline: 1733617306666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:46,670 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:46,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37772 deadline: 1733617306667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:46,670 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:46,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37778 deadline: 1733617306667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:46,681 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/83394c744268448ebda34fd64ef04f7d is 50, key is test_row_0/B:col10/1733617246647/Put/seqid=0 2024-12-08T00:20:46,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742148_1324 (size=12001) 2024-12-08T00:20:46,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-08T00:20:46,763 INFO [Thread-1424 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 71 completed 2024-12-08T00:20:46,764 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T00:20:46,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees 2024-12-08T00:20:46,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-08T00:20:46,766 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:20:46,767 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:20:46,767 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=74, ppid=73, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:20:46,771 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:46,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37748 deadline: 1733617306769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:46,772 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:46,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37790 deadline: 1733617306770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:46,772 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:46,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37732 deadline: 1733617306770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:46,773 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:46,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37772 deadline: 1733617306771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:46,784 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:46,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37778 deadline: 1733617306782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:46,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-08T00:20:46,918 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:46,919 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-12-08T00:20:46,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:46,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. as already flushing 2024-12-08T00:20:46,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:46,919 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:46,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:46,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:46,974 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:46,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37732 deadline: 1733617306973, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:46,975 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:46,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37790 deadline: 1733617306974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:46,976 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:46,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37748 deadline: 1733617306974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:46,977 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:46,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37772 deadline: 1733617306975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:46,988 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:46,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37778 deadline: 1733617306986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:47,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-08T00:20:47,072 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:47,073 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-12-08T00:20:47,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:47,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. as already flushing 2024-12-08T00:20:47,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:47,073 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:47,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:47,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:47,120 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/83394c744268448ebda34fd64ef04f7d 2024-12-08T00:20:47,129 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/d7f497f9bf9f4ae6a2674c0f8e2bfb69 is 50, key is test_row_0/C:col10/1733617246647/Put/seqid=0 2024-12-08T00:20:47,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742149_1325 (size=12001) 2024-12-08T00:20:47,140 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/d7f497f9bf9f4ae6a2674c0f8e2bfb69 2024-12-08T00:20:47,145 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/d84710f8762946ed9cd1016be69aba06 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/d84710f8762946ed9cd1016be69aba06 2024-12-08T00:20:47,156 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/d84710f8762946ed9cd1016be69aba06, entries=150, sequenceid=50, filesize=11.7 K 2024-12-08T00:20:47,157 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/83394c744268448ebda34fd64ef04f7d as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/83394c744268448ebda34fd64ef04f7d 2024-12-08T00:20:47,163 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/83394c744268448ebda34fd64ef04f7d, entries=150, sequenceid=50, filesize=11.7 K 2024-12-08T00:20:47,164 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/d7f497f9bf9f4ae6a2674c0f8e2bfb69 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/d7f497f9bf9f4ae6a2674c0f8e2bfb69 2024-12-08T00:20:47,169 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/d7f497f9bf9f4ae6a2674c0f8e2bfb69, entries=150, sequenceid=50, filesize=11.7 K 2024-12-08T00:20:47,171 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for d6019a516c33d3d08395be7add424e27 in 523ms, sequenceid=50, compaction requested=true 2024-12-08T00:20:47,171 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d6019a516c33d3d08395be7add424e27: 2024-12-08T00:20:47,171 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:20:47,172 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d6019a516c33d3d08395be7add424e27:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:20:47,172 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:47,172 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:20:47,173 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d6019a516c33d3d08395be7add424e27:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T00:20:47,173 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:47,173 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d6019a516c33d3d08395be7add424e27:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:20:47,173 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:20:47,174 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38343 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:20:47,174 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): d6019a516c33d3d08395be7add424e27/A is initiating minor compaction (all files) 2024-12-08T00:20:47,174 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d6019a516c33d3d08395be7add424e27/A in TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:47,174 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/594419844177417ebb9c42cf6019af4a, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/46979a23024f4c18bd751aed22be1f0b, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/d84710f8762946ed9cd1016be69aba06] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp, totalSize=37.4 K 2024-12-08T00:20:47,175 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 594419844177417ebb9c42cf6019af4a, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733617246167 2024-12-08T00:20:47,175 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:20:47,175 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): d6019a516c33d3d08395be7add424e27/B is initiating minor compaction (all files) 2024-12-08T00:20:47,175 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d6019a516c33d3d08395be7add424e27/B in TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:47,175 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/dd70acd3e07245a5980486f9e7b4f9b2, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/192453ed55f147d3a663fd9bd6a9bdd9, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/83394c744268448ebda34fd64ef04f7d] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp, totalSize=35.2 K 2024-12-08T00:20:47,176 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 46979a23024f4c18bd751aed22be1f0b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1733617246213 2024-12-08T00:20:47,176 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting dd70acd3e07245a5980486f9e7b4f9b2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733617246167 2024-12-08T00:20:47,176 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting d84710f8762946ed9cd1016be69aba06, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1733617246533 2024-12-08T00:20:47,176 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 192453ed55f147d3a663fd9bd6a9bdd9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1733617246213 2024-12-08T00:20:47,177 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 83394c744268448ebda34fd64ef04f7d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1733617246533 2024-12-08T00:20:47,190 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d6019a516c33d3d08395be7add424e27#B#compaction#266 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:20:47,191 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/23cbb82d84e949458a1fb789c4c1bd1c is 50, key is test_row_0/B:col10/1733617246647/Put/seqid=0 2024-12-08T00:20:47,210 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d6019a516c33d3d08395be7add424e27#A#compaction#267 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:20:47,211 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/163ed51cb8ae42efab436b216648e95a is 50, key is test_row_0/A:col10/1733617246647/Put/seqid=0 2024-12-08T00:20:47,226 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:47,226 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-12-08T00:20:47,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:47,226 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2837): Flushing d6019a516c33d3d08395be7add424e27 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-08T00:20:47,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=A 2024-12-08T00:20:47,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:47,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=B 2024-12-08T00:20:47,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:47,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=C 2024-12-08T00:20:47,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:47,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742151_1327 (size=12104) 2024-12-08T00:20:47,250 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/210e34102b734265a52a4a799ac40ab2 is 50, key is test_row_0/A:col10/1733617246665/Put/seqid=0 2024-12-08T00:20:47,258 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/163ed51cb8ae42efab436b216648e95a as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/163ed51cb8ae42efab436b216648e95a 2024-12-08T00:20:47,264 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d6019a516c33d3d08395be7add424e27/A of d6019a516c33d3d08395be7add424e27 into 163ed51cb8ae42efab436b216648e95a(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:20:47,264 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d6019a516c33d3d08395be7add424e27: 2024-12-08T00:20:47,264 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27., storeName=d6019a516c33d3d08395be7add424e27/A, priority=13, startTime=1733617247171; duration=0sec 2024-12-08T00:20:47,265 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:20:47,265 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d6019a516c33d3d08395be7add424e27:A 2024-12-08T00:20:47,265 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:20:47,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742150_1326 (size=12104) 2024-12-08T00:20:47,269 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:20:47,269 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): d6019a516c33d3d08395be7add424e27/C is initiating minor compaction (all files) 2024-12-08T00:20:47,269 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d6019a516c33d3d08395be7add424e27/C in TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:47,269 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/df06d91414074b13aba91a24a0bc123f, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/692471317dd1457e9b9f3c832b8b5de9, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/d7f497f9bf9f4ae6a2674c0f8e2bfb69] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp, totalSize=35.2 K 2024-12-08T00:20:47,270 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting df06d91414074b13aba91a24a0bc123f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733617246167 2024-12-08T00:20:47,270 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 692471317dd1457e9b9f3c832b8b5de9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1733617246213 2024-12-08T00:20:47,270 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting d7f497f9bf9f4ae6a2674c0f8e2bfb69, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1733617246533 2024-12-08T00:20:47,280 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. as already flushing 2024-12-08T00:20:47,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on d6019a516c33d3d08395be7add424e27 2024-12-08T00:20:47,282 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d6019a516c33d3d08395be7add424e27#C#compaction#269 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:20:47,283 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/e5ead05e71a743d18504b19a0b3e90be is 50, key is test_row_0/C:col10/1733617246647/Put/seqid=0 2024-12-08T00:20:47,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742152_1328 (size=12001) 2024-12-08T00:20:47,285 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/210e34102b734265a52a4a799ac40ab2 2024-12-08T00:20:47,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/19be09838e5e4ed6b1cece58365c753f is 50, key is test_row_0/B:col10/1733617246665/Put/seqid=0 2024-12-08T00:20:47,297 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:47,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37778 deadline: 1733617307291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:47,298 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:47,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37748 deadline: 1733617307294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:47,298 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:47,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37772 deadline: 1733617307294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:47,298 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:47,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37732 deadline: 1733617307294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:47,298 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:47,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37790 deadline: 1733617307295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:47,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742153_1329 (size=12104) 2024-12-08T00:20:47,314 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/e5ead05e71a743d18504b19a0b3e90be as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/e5ead05e71a743d18504b19a0b3e90be 2024-12-08T00:20:47,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742154_1330 (size=12001) 2024-12-08T00:20:47,320 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/19be09838e5e4ed6b1cece58365c753f 2024-12-08T00:20:47,320 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d6019a516c33d3d08395be7add424e27/C of d6019a516c33d3d08395be7add424e27 into e5ead05e71a743d18504b19a0b3e90be(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:20:47,320 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d6019a516c33d3d08395be7add424e27: 2024-12-08T00:20:47,320 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27., storeName=d6019a516c33d3d08395be7add424e27/C, priority=13, startTime=1733617247173; duration=0sec 2024-12-08T00:20:47,321 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:47,321 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d6019a516c33d3d08395be7add424e27:C 2024-12-08T00:20:47,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/49fee7a8e6574650b3abae8df284b8fa is 50, key is test_row_0/C:col10/1733617246665/Put/seqid=0 2024-12-08T00:20:47,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742155_1331 (size=12001) 2024-12-08T00:20:47,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-08T00:20:47,400 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:47,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37748 deadline: 1733617307399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:47,400 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:47,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37772 deadline: 1733617307399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:47,401 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:47,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37790 deadline: 1733617307399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:47,403 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:47,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37732 deadline: 1733617307400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:47,603 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:47,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37772 deadline: 1733617307601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:47,603 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:47,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37790 deadline: 1733617307602, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:47,604 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:47,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37748 deadline: 1733617307602, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:47,606 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:47,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37732 deadline: 1733617307604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:47,674 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/23cbb82d84e949458a1fb789c4c1bd1c as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/23cbb82d84e949458a1fb789c4c1bd1c 2024-12-08T00:20:47,679 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d6019a516c33d3d08395be7add424e27/B of d6019a516c33d3d08395be7add424e27 into 23cbb82d84e949458a1fb789c4c1bd1c(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:20:47,679 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d6019a516c33d3d08395be7add424e27: 2024-12-08T00:20:47,679 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27., storeName=d6019a516c33d3d08395be7add424e27/B, priority=13, startTime=1733617247172; duration=0sec 2024-12-08T00:20:47,679 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:47,679 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d6019a516c33d3d08395be7add424e27:B 2024-12-08T00:20:47,732 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/49fee7a8e6574650b3abae8df284b8fa 2024-12-08T00:20:47,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/210e34102b734265a52a4a799ac40ab2 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/210e34102b734265a52a4a799ac40ab2 2024-12-08T00:20:47,741 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/210e34102b734265a52a4a799ac40ab2, entries=150, sequenceid=74, filesize=11.7 K 2024-12-08T00:20:47,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/19be09838e5e4ed6b1cece58365c753f as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/19be09838e5e4ed6b1cece58365c753f 2024-12-08T00:20:47,746 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/19be09838e5e4ed6b1cece58365c753f, entries=150, sequenceid=74, filesize=11.7 K 2024-12-08T00:20:47,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/49fee7a8e6574650b3abae8df284b8fa as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/49fee7a8e6574650b3abae8df284b8fa 2024-12-08T00:20:47,750 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/49fee7a8e6574650b3abae8df284b8fa, entries=150, sequenceid=74, filesize=11.7 K 2024-12-08T00:20:47,751 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for d6019a516c33d3d08395be7add424e27 in 525ms, sequenceid=74, compaction requested=false 2024-12-08T00:20:47,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2538): Flush status journal for d6019a516c33d3d08395be7add424e27: 2024-12-08T00:20:47,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:47,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=74 2024-12-08T00:20:47,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4106): Remote procedure done, pid=74 2024-12-08T00:20:47,754 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=74, resume processing ppid=73 2024-12-08T00:20:47,754 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=74, ppid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 986 msec 2024-12-08T00:20:47,756 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees in 991 msec 2024-12-08T00:20:47,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on d6019a516c33d3d08395be7add424e27 2024-12-08T00:20:47,815 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d6019a516c33d3d08395be7add424e27 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-08T00:20:47,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=A 2024-12-08T00:20:47,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:47,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=B 2024-12-08T00:20:47,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:47,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=C 2024-12-08T00:20:47,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:47,820 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/16c62cbedbe94962b016de25b5b75f16 is 50, key is test_row_0/A:col10/1733617247294/Put/seqid=0 2024-12-08T00:20:47,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742156_1332 (size=14341) 2024-12-08T00:20:47,867 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:47,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37778 deadline: 1733617307866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:47,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-08T00:20:47,870 INFO [Thread-1424 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 73 completed 2024-12-08T00:20:47,871 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T00:20:47,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees 2024-12-08T00:20:47,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-08T00:20:47,874 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:20:47,874 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:20:47,874 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=76, ppid=75, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:20:47,907 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:47,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37772 deadline: 1733617307905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:47,908 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:47,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37790 deadline: 1733617307906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:47,908 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:47,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37732 deadline: 1733617307907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:47,908 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:47,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37748 deadline: 1733617307907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:47,970 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:47,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37778 deadline: 1733617307968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:47,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-08T00:20:48,026 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:48,027 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-08T00:20:48,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:48,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. as already flushing 2024-12-08T00:20:48,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:48,027 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:48,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:48,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:48,181 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:48,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-08T00:20:48,182 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-08T00:20:48,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:48,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. as already flushing 2024-12-08T00:20:48,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:48,182 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:48,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:48,183 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:48,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37778 deadline: 1733617308181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:48,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:48,225 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/16c62cbedbe94962b016de25b5b75f16 2024-12-08T00:20:48,232 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/675ff23d4b0e49ec979bbc044921b4a1 is 50, key is test_row_0/B:col10/1733617247294/Put/seqid=0 2024-12-08T00:20:48,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742157_1333 (size=12001) 2024-12-08T00:20:48,334 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:48,335 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-08T00:20:48,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:48,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. as already flushing 2024-12-08T00:20:48,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:48,335 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:48,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:48,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:48,410 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:48,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37790 deadline: 1733617308409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:48,411 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:48,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37748 deadline: 1733617308409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:48,411 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:48,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37772 deadline: 1733617308410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:48,412 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:48,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37732 deadline: 1733617308411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:48,484 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:48,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37778 deadline: 1733617308484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:48,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-08T00:20:48,487 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:48,487 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-08T00:20:48,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:48,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. as already flushing 2024-12-08T00:20:48,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:48,488 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:48,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:48,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:48,637 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/675ff23d4b0e49ec979bbc044921b4a1 2024-12-08T00:20:48,640 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:48,641 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-08T00:20:48,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:48,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. as already flushing 2024-12-08T00:20:48,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:48,641 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:48,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:48,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:48,649 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/2ff1e43af1a640a38b10235a7fe81a15 is 50, key is test_row_0/C:col10/1733617247294/Put/seqid=0 2024-12-08T00:20:48,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742158_1334 (size=12001) 2024-12-08T00:20:48,653 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/2ff1e43af1a640a38b10235a7fe81a15 2024-12-08T00:20:48,660 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/16c62cbedbe94962b016de25b5b75f16 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/16c62cbedbe94962b016de25b5b75f16 2024-12-08T00:20:48,664 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/16c62cbedbe94962b016de25b5b75f16, entries=200, sequenceid=90, filesize=14.0 K 2024-12-08T00:20:48,666 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/675ff23d4b0e49ec979bbc044921b4a1 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/675ff23d4b0e49ec979bbc044921b4a1 2024-12-08T00:20:48,670 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/675ff23d4b0e49ec979bbc044921b4a1, entries=150, sequenceid=90, filesize=11.7 K 2024-12-08T00:20:48,671 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/2ff1e43af1a640a38b10235a7fe81a15 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/2ff1e43af1a640a38b10235a7fe81a15 2024-12-08T00:20:48,676 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/2ff1e43af1a640a38b10235a7fe81a15, entries=150, sequenceid=90, filesize=11.7 K 2024-12-08T00:20:48,677 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for d6019a516c33d3d08395be7add424e27 in 862ms, sequenceid=90, compaction requested=true 2024-12-08T00:20:48,677 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d6019a516c33d3d08395be7add424e27: 2024-12-08T00:20:48,677 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d6019a516c33d3d08395be7add424e27:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:20:48,678 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:48,678 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:20:48,678 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:20:48,678 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d6019a516c33d3d08395be7add424e27:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T00:20:48,678 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:48,678 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d6019a516c33d3d08395be7add424e27:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:20:48,678 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:20:48,679 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:20:48,679 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): d6019a516c33d3d08395be7add424e27/B is initiating minor compaction (all files) 2024-12-08T00:20:48,679 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d6019a516c33d3d08395be7add424e27/B in TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:48,679 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/23cbb82d84e949458a1fb789c4c1bd1c, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/19be09838e5e4ed6b1cece58365c753f, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/675ff23d4b0e49ec979bbc044921b4a1] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp, totalSize=35.3 K 2024-12-08T00:20:48,679 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38446 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:20:48,679 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): d6019a516c33d3d08395be7add424e27/A is initiating minor compaction (all files) 2024-12-08T00:20:48,679 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d6019a516c33d3d08395be7add424e27/A in TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:48,679 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/163ed51cb8ae42efab436b216648e95a, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/210e34102b734265a52a4a799ac40ab2, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/16c62cbedbe94962b016de25b5b75f16] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp, totalSize=37.5 K 2024-12-08T00:20:48,680 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 163ed51cb8ae42efab436b216648e95a, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1733617246533 2024-12-08T00:20:48,680 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 23cbb82d84e949458a1fb789c4c1bd1c, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1733617246533 2024-12-08T00:20:48,681 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 19be09838e5e4ed6b1cece58365c753f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1733617246662 2024-12-08T00:20:48,681 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 210e34102b734265a52a4a799ac40ab2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1733617246662 2024-12-08T00:20:48,682 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 675ff23d4b0e49ec979bbc044921b4a1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1733617247290 2024-12-08T00:20:48,682 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 16c62cbedbe94962b016de25b5b75f16, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1733617247283 2024-12-08T00:20:48,692 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d6019a516c33d3d08395be7add424e27#B#compaction#275 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:20:48,693 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/ed9ebab0b30b45949d7a02e42febe516 is 50, key is test_row_0/B:col10/1733617247294/Put/seqid=0 2024-12-08T00:20:48,694 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d6019a516c33d3d08395be7add424e27#A#compaction#276 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:20:48,695 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/f7b7cb2bc81640f780b7c3172676e2bc is 50, key is test_row_0/A:col10/1733617247294/Put/seqid=0 2024-12-08T00:20:48,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742159_1335 (size=12207) 2024-12-08T00:20:48,708 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/ed9ebab0b30b45949d7a02e42febe516 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/ed9ebab0b30b45949d7a02e42febe516 2024-12-08T00:20:48,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742160_1336 (size=12207) 2024-12-08T00:20:48,715 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d6019a516c33d3d08395be7add424e27/B of d6019a516c33d3d08395be7add424e27 into ed9ebab0b30b45949d7a02e42febe516(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:20:48,715 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d6019a516c33d3d08395be7add424e27: 2024-12-08T00:20:48,715 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27., storeName=d6019a516c33d3d08395be7add424e27/B, priority=13, startTime=1733617248678; duration=0sec 2024-12-08T00:20:48,715 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:20:48,715 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d6019a516c33d3d08395be7add424e27:B 2024-12-08T00:20:48,715 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:20:48,716 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:20:48,716 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): d6019a516c33d3d08395be7add424e27/C is initiating minor compaction (all files) 2024-12-08T00:20:48,716 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d6019a516c33d3d08395be7add424e27/C in TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:48,716 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/e5ead05e71a743d18504b19a0b3e90be, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/49fee7a8e6574650b3abae8df284b8fa, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/2ff1e43af1a640a38b10235a7fe81a15] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp, totalSize=35.3 K 2024-12-08T00:20:48,717 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting e5ead05e71a743d18504b19a0b3e90be, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1733617246533 2024-12-08T00:20:48,717 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 49fee7a8e6574650b3abae8df284b8fa, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1733617246662 2024-12-08T00:20:48,718 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 2ff1e43af1a640a38b10235a7fe81a15, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1733617247290 2024-12-08T00:20:48,738 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d6019a516c33d3d08395be7add424e27#C#compaction#277 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:20:48,739 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/0926b32f73cc436ab0c0c04345bb95be is 50, key is test_row_0/C:col10/1733617247294/Put/seqid=0 2024-12-08T00:20:48,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742161_1337 (size=12207) 2024-12-08T00:20:48,794 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:48,794 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-08T00:20:48,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:48,795 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2837): Flushing d6019a516c33d3d08395be7add424e27 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-08T00:20:48,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=A 2024-12-08T00:20:48,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:48,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=B 2024-12-08T00:20:48,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:48,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=C 2024-12-08T00:20:48,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:48,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/8c49fbbd6a9c4b65925812afd2913135 is 50, key is test_row_0/A:col10/1733617247854/Put/seqid=0 2024-12-08T00:20:48,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742162_1338 (size=12001) 2024-12-08T00:20:48,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-08T00:20:48,990 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. as already flushing 2024-12-08T00:20:48,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on d6019a516c33d3d08395be7add424e27 2024-12-08T00:20:49,015 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:49,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37778 deadline: 1733617309012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:49,117 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/f7b7cb2bc81640f780b7c3172676e2bc as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/f7b7cb2bc81640f780b7c3172676e2bc 2024-12-08T00:20:49,117 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:49,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37778 deadline: 1733617309116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:49,123 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d6019a516c33d3d08395be7add424e27/A of d6019a516c33d3d08395be7add424e27 into f7b7cb2bc81640f780b7c3172676e2bc(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:20:49,123 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d6019a516c33d3d08395be7add424e27: 2024-12-08T00:20:49,123 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27., storeName=d6019a516c33d3d08395be7add424e27/A, priority=13, startTime=1733617248677; duration=0sec 2024-12-08T00:20:49,123 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:49,123 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d6019a516c33d3d08395be7add424e27:A 2024-12-08T00:20:49,179 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/0926b32f73cc436ab0c0c04345bb95be as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/0926b32f73cc436ab0c0c04345bb95be 2024-12-08T00:20:49,185 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d6019a516c33d3d08395be7add424e27/C of d6019a516c33d3d08395be7add424e27 into 0926b32f73cc436ab0c0c04345bb95be(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:20:49,186 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d6019a516c33d3d08395be7add424e27: 2024-12-08T00:20:49,186 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27., storeName=d6019a516c33d3d08395be7add424e27/C, priority=13, startTime=1733617248678; duration=0sec 2024-12-08T00:20:49,186 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:49,186 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d6019a516c33d3d08395be7add424e27:C 2024-12-08T00:20:49,209 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/8c49fbbd6a9c4b65925812afd2913135 2024-12-08T00:20:49,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/256df776d68c44339543f8ec41f48d97 is 50, key is test_row_0/B:col10/1733617247854/Put/seqid=0 2024-12-08T00:20:49,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742163_1339 (size=12001) 2024-12-08T00:20:49,222 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/256df776d68c44339543f8ec41f48d97 2024-12-08T00:20:49,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/83843c4e378e4f2db19a999d89204adf is 50, key is test_row_0/C:col10/1733617247854/Put/seqid=0 2024-12-08T00:20:49,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742164_1340 (size=12001) 2024-12-08T00:20:49,254 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-08T00:20:49,322 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:49,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37778 deadline: 1733617309320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:49,416 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:49,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37772 deadline: 1733617309415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:49,417 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:49,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37790 deadline: 1733617309415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:49,419 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:49,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37732 deadline: 1733617309417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:49,419 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:49,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37748 deadline: 1733617309418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:49,625 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:49,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37778 deadline: 1733617309625, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:49,638 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/83843c4e378e4f2db19a999d89204adf 2024-12-08T00:20:49,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/8c49fbbd6a9c4b65925812afd2913135 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/8c49fbbd6a9c4b65925812afd2913135 2024-12-08T00:20:49,648 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/8c49fbbd6a9c4b65925812afd2913135, entries=150, sequenceid=114, filesize=11.7 K 2024-12-08T00:20:49,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/256df776d68c44339543f8ec41f48d97 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/256df776d68c44339543f8ec41f48d97 2024-12-08T00:20:49,653 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/256df776d68c44339543f8ec41f48d97, entries=150, sequenceid=114, filesize=11.7 K 2024-12-08T00:20:49,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/83843c4e378e4f2db19a999d89204adf as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/83843c4e378e4f2db19a999d89204adf 2024-12-08T00:20:49,659 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/83843c4e378e4f2db19a999d89204adf, entries=150, sequenceid=114, filesize=11.7 K 2024-12-08T00:20:49,661 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for d6019a516c33d3d08395be7add424e27 in 866ms, sequenceid=114, compaction requested=false 2024-12-08T00:20:49,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2538): Flush status journal for d6019a516c33d3d08395be7add424e27: 2024-12-08T00:20:49,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:49,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=76 2024-12-08T00:20:49,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4106): Remote procedure done, pid=76 2024-12-08T00:20:49,665 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=76, resume processing ppid=75 2024-12-08T00:20:49,665 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=76, ppid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7890 sec 2024-12-08T00:20:49,667 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees in 1.7950 sec 2024-12-08T00:20:49,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-08T00:20:49,987 INFO [Thread-1424 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 75 completed 2024-12-08T00:20:49,989 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T00:20:49,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees 2024-12-08T00:20:49,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-08T00:20:49,991 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:20:49,991 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:20:49,991 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=78, ppid=77, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:20:50,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-08T00:20:50,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on d6019a516c33d3d08395be7add424e27 2024-12-08T00:20:50,130 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d6019a516c33d3d08395be7add424e27 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-08T00:20:50,130 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=A 2024-12-08T00:20:50,130 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:50,130 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=B 2024-12-08T00:20:50,130 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:50,130 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=C 2024-12-08T00:20:50,130 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:50,135 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/88d11d6424cc4a4f8c45455a1c5b04a4 is 50, key is test_row_0/A:col10/1733617249009/Put/seqid=0 2024-12-08T00:20:50,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742165_1341 (size=12051) 2024-12-08T00:20:50,143 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:50,144 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-08T00:20:50,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:50,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. as already flushing 2024-12-08T00:20:50,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:50,144 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:50,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:50,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:50,179 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:50,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37778 deadline: 1733617310178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:50,283 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:50,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37778 deadline: 1733617310280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:50,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-08T00:20:50,296 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:50,296 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-08T00:20:50,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:50,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. as already flushing 2024-12-08T00:20:50,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:50,297 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:50,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:50,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:50,449 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:50,449 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-08T00:20:50,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:50,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. as already flushing 2024-12-08T00:20:50,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:50,450 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:50,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:50,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:50,486 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:50,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37778 deadline: 1733617310485, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:50,541 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/88d11d6424cc4a4f8c45455a1c5b04a4 2024-12-08T00:20:50,549 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/b934f280da104030a149e950c311abe8 is 50, key is test_row_0/B:col10/1733617249009/Put/seqid=0 2024-12-08T00:20:50,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742166_1342 (size=12051) 2024-12-08T00:20:50,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-08T00:20:50,601 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:50,601 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-08T00:20:50,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:50,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. as already flushing 2024-12-08T00:20:50,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:50,602 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:50,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:50,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:50,756 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:50,757 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-08T00:20:50,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:50,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. as already flushing 2024-12-08T00:20:50,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:50,757 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:50,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:50,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:50,791 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:50,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37778 deadline: 1733617310789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:50,909 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:50,910 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-08T00:20:50,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:50,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. as already flushing 2024-12-08T00:20:50,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:50,910 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:50,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:50,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:50,960 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/b934f280da104030a149e950c311abe8 2024-12-08T00:20:50,978 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/0bcabd8cb100402cbd93959420ccb52c is 50, key is test_row_0/C:col10/1733617249009/Put/seqid=0 2024-12-08T00:20:50,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742167_1343 (size=12051) 2024-12-08T00:20:51,062 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:51,063 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-08T00:20:51,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:51,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. as already flushing 2024-12-08T00:20:51,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:51,063 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:51,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:51,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:51,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-08T00:20:51,215 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:51,216 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-08T00:20:51,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:51,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. as already flushing 2024-12-08T00:20:51,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:51,216 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:51,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:51,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:51,293 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:51,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37778 deadline: 1733617311293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:51,368 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:51,369 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-08T00:20:51,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:51,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. as already flushing 2024-12-08T00:20:51,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:51,369 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:51,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:51,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:51,398 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/0bcabd8cb100402cbd93959420ccb52c 2024-12-08T00:20:51,404 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/88d11d6424cc4a4f8c45455a1c5b04a4 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/88d11d6424cc4a4f8c45455a1c5b04a4 2024-12-08T00:20:51,408 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/88d11d6424cc4a4f8c45455a1c5b04a4, entries=150, sequenceid=130, filesize=11.8 K 2024-12-08T00:20:51,409 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/b934f280da104030a149e950c311abe8 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/b934f280da104030a149e950c311abe8 2024-12-08T00:20:51,413 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/b934f280da104030a149e950c311abe8, entries=150, sequenceid=130, filesize=11.8 K 2024-12-08T00:20:51,413 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/0bcabd8cb100402cbd93959420ccb52c as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/0bcabd8cb100402cbd93959420ccb52c 2024-12-08T00:20:51,417 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/0bcabd8cb100402cbd93959420ccb52c, entries=150, sequenceid=130, filesize=11.8 K 2024-12-08T00:20:51,418 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for d6019a516c33d3d08395be7add424e27 in 1288ms, sequenceid=130, compaction requested=true 2024-12-08T00:20:51,418 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d6019a516c33d3d08395be7add424e27: 2024-12-08T00:20:51,418 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d6019a516c33d3d08395be7add424e27:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:20:51,418 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:51,418 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d6019a516c33d3d08395be7add424e27:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T00:20:51,418 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:51,418 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:20:51,418 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d6019a516c33d3d08395be7add424e27:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:20:51,418 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:20:51,418 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:20:51,419 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36259 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:20:51,419 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): d6019a516c33d3d08395be7add424e27/A is initiating minor compaction (all files) 2024-12-08T00:20:51,419 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d6019a516c33d3d08395be7add424e27/A in TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:51,419 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/f7b7cb2bc81640f780b7c3172676e2bc, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/8c49fbbd6a9c4b65925812afd2913135, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/88d11d6424cc4a4f8c45455a1c5b04a4] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp, totalSize=35.4 K 2024-12-08T00:20:51,419 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36259 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:20:51,419 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): d6019a516c33d3d08395be7add424e27/B is initiating minor compaction (all files) 2024-12-08T00:20:51,420 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d6019a516c33d3d08395be7add424e27/B in TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:51,420 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting f7b7cb2bc81640f780b7c3172676e2bc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1733617247290 2024-12-08T00:20:51,420 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/ed9ebab0b30b45949d7a02e42febe516, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/256df776d68c44339543f8ec41f48d97, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/b934f280da104030a149e950c311abe8] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp, totalSize=35.4 K 2024-12-08T00:20:51,420 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8c49fbbd6a9c4b65925812afd2913135, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1733617247854 2024-12-08T00:20:51,420 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting ed9ebab0b30b45949d7a02e42febe516, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1733617247290 2024-12-08T00:20:51,420 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 88d11d6424cc4a4f8c45455a1c5b04a4, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1733617248992 2024-12-08T00:20:51,420 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 256df776d68c44339543f8ec41f48d97, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1733617247854 2024-12-08T00:20:51,421 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting b934f280da104030a149e950c311abe8, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1733617248992 2024-12-08T00:20:51,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on d6019a516c33d3d08395be7add424e27 2024-12-08T00:20:51,426 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d6019a516c33d3d08395be7add424e27 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-08T00:20:51,426 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=A 2024-12-08T00:20:51,426 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:51,426 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=B 2024-12-08T00:20:51,426 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:51,426 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=C 2024-12-08T00:20:51,426 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:51,427 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d6019a516c33d3d08395be7add424e27#B#compaction#285 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:20:51,427 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/7b3b45f443024ba5b33aac05cf8695c6 is 50, key is test_row_0/B:col10/1733617249009/Put/seqid=0 2024-12-08T00:20:51,428 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d6019a516c33d3d08395be7add424e27#A#compaction#284 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:20:51,428 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/61a52732452d4f43aedc696c86ad98a2 is 50, key is test_row_0/A:col10/1733617249009/Put/seqid=0 2024-12-08T00:20:51,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742168_1344 (size=12359) 2024-12-08T00:20:51,442 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/a0cdcb07ff444898bde7b91d39610451 is 50, key is test_row_0/A:col10/1733617250174/Put/seqid=0 2024-12-08T00:20:51,444 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/7b3b45f443024ba5b33aac05cf8695c6 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/7b3b45f443024ba5b33aac05cf8695c6 2024-12-08T00:20:51,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742169_1345 (size=12359) 2024-12-08T00:20:51,447 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:51,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37772 deadline: 1733617311443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:51,448 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:51,448 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:51,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37790 deadline: 1733617311443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:51,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37732 deadline: 1733617311444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:51,451 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:51,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37748 deadline: 1733617311448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:51,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742170_1346 (size=14541) 2024-12-08T00:20:51,452 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d6019a516c33d3d08395be7add424e27/B of d6019a516c33d3d08395be7add424e27 into 7b3b45f443024ba5b33aac05cf8695c6(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:20:51,452 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d6019a516c33d3d08395be7add424e27: 2024-12-08T00:20:51,452 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27., storeName=d6019a516c33d3d08395be7add424e27/B, priority=13, startTime=1733617251418; duration=0sec 2024-12-08T00:20:51,452 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:20:51,452 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d6019a516c33d3d08395be7add424e27:B 2024-12-08T00:20:51,452 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:20:51,452 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/61a52732452d4f43aedc696c86ad98a2 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/61a52732452d4f43aedc696c86ad98a2 2024-12-08T00:20:51,452 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=153 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/a0cdcb07ff444898bde7b91d39610451 2024-12-08T00:20:51,454 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36259 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:20:51,454 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): d6019a516c33d3d08395be7add424e27/C is initiating minor compaction (all files) 2024-12-08T00:20:51,454 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d6019a516c33d3d08395be7add424e27/C in TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:51,454 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/0926b32f73cc436ab0c0c04345bb95be, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/83843c4e378e4f2db19a999d89204adf, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/0bcabd8cb100402cbd93959420ccb52c] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp, totalSize=35.4 K 2024-12-08T00:20:51,455 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 0926b32f73cc436ab0c0c04345bb95be, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1733617247290 2024-12-08T00:20:51,455 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 83843c4e378e4f2db19a999d89204adf, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1733617247854 2024-12-08T00:20:51,455 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 0bcabd8cb100402cbd93959420ccb52c, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1733617248992 2024-12-08T00:20:51,459 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d6019a516c33d3d08395be7add424e27/A of d6019a516c33d3d08395be7add424e27 into 61a52732452d4f43aedc696c86ad98a2(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:20:51,459 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d6019a516c33d3d08395be7add424e27: 2024-12-08T00:20:51,459 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27., storeName=d6019a516c33d3d08395be7add424e27/A, priority=13, startTime=1733617251418; duration=0sec 2024-12-08T00:20:51,459 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:51,459 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d6019a516c33d3d08395be7add424e27:A 2024-12-08T00:20:51,462 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/b1f61414c39d460f9fe0a139abc27a56 is 50, key is test_row_0/B:col10/1733617250174/Put/seqid=0 2024-12-08T00:20:51,470 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d6019a516c33d3d08395be7add424e27#C#compaction#288 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:20:51,471 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/08d0026358a14885a2f7eae04087dda3 is 50, key is test_row_0/C:col10/1733617249009/Put/seqid=0 2024-12-08T00:20:51,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742171_1347 (size=12151) 2024-12-08T00:20:51,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742172_1348 (size=12359) 2024-12-08T00:20:51,522 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:51,522 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-08T00:20:51,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:51,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. as already flushing 2024-12-08T00:20:51,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:51,522 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:51,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:51,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:51,549 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:51,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37772 deadline: 1733617311549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:51,550 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:51,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37790 deadline: 1733617311549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:51,550 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:51,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37732 deadline: 1733617311549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:51,553 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:51,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37748 deadline: 1733617311552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:51,675 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:51,675 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-08T00:20:51,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:51,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. as already flushing 2024-12-08T00:20:51,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:51,675 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:51,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:51,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:51,751 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:51,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37790 deadline: 1733617311751, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:51,753 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:51,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37772 deadline: 1733617311751, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:51,754 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:51,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37732 deadline: 1733617311752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:51,757 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:51,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37748 deadline: 1733617311755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:51,828 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:51,828 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-08T00:20:51,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:51,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. as already flushing 2024-12-08T00:20:51,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:51,829 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:51,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:51,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:51,878 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=153 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/b1f61414c39d460f9fe0a139abc27a56 2024-12-08T00:20:51,884 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/08d0026358a14885a2f7eae04087dda3 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/08d0026358a14885a2f7eae04087dda3 2024-12-08T00:20:51,887 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/b421747bc48d46db94be9d09c8db7b9f is 50, key is test_row_0/C:col10/1733617250174/Put/seqid=0 2024-12-08T00:20:51,889 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d6019a516c33d3d08395be7add424e27/C of d6019a516c33d3d08395be7add424e27 into 08d0026358a14885a2f7eae04087dda3(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:20:51,889 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d6019a516c33d3d08395be7add424e27: 2024-12-08T00:20:51,889 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27., storeName=d6019a516c33d3d08395be7add424e27/C, priority=13, startTime=1733617251418; duration=0sec 2024-12-08T00:20:51,890 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:51,890 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d6019a516c33d3d08395be7add424e27:C 2024-12-08T00:20:51,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742173_1349 (size=12151) 2024-12-08T00:20:51,981 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:51,981 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-08T00:20:51,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:51,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. as already flushing 2024-12-08T00:20:51,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:51,982 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:51,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:51,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:52,055 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:52,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37772 deadline: 1733617312054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:52,055 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:52,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37790 deadline: 1733617312054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:52,057 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:52,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37732 deadline: 1733617312056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:52,060 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:52,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37748 deadline: 1733617312059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:52,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-08T00:20:52,133 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:52,134 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-08T00:20:52,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:52,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. as already flushing 2024-12-08T00:20:52,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:52,134 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:52,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:52,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:52,286 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:52,287 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-08T00:20:52,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:52,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. as already flushing 2024-12-08T00:20:52,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:52,287 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:52,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:52,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:52,293 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=153 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/b421747bc48d46db94be9d09c8db7b9f 2024-12-08T00:20:52,298 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/a0cdcb07ff444898bde7b91d39610451 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/a0cdcb07ff444898bde7b91d39610451 2024-12-08T00:20:52,302 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/a0cdcb07ff444898bde7b91d39610451, entries=200, sequenceid=153, filesize=14.2 K 2024-12-08T00:20:52,304 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/b1f61414c39d460f9fe0a139abc27a56 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/b1f61414c39d460f9fe0a139abc27a56 2024-12-08T00:20:52,305 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:52,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37778 deadline: 1733617312304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:52,309 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/b1f61414c39d460f9fe0a139abc27a56, entries=150, sequenceid=153, filesize=11.9 K 2024-12-08T00:20:52,310 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/b421747bc48d46db94be9d09c8db7b9f as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/b421747bc48d46db94be9d09c8db7b9f 2024-12-08T00:20:52,314 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/b421747bc48d46db94be9d09c8db7b9f, entries=150, sequenceid=153, filesize=11.9 K 2024-12-08T00:20:52,316 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for d6019a516c33d3d08395be7add424e27 in 890ms, sequenceid=153, compaction requested=false 2024-12-08T00:20:52,316 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d6019a516c33d3d08395be7add424e27: 2024-12-08T00:20:52,440 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:52,440 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-08T00:20:52,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:52,440 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2837): Flushing d6019a516c33d3d08395be7add424e27 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-08T00:20:52,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=A 2024-12-08T00:20:52,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:52,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=B 2024-12-08T00:20:52,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:52,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=C 2024-12-08T00:20:52,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:52,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/24955d4aab9342ff89917a96bc4fdd9e is 50, key is test_row_0/A:col10/1733617251446/Put/seqid=0 2024-12-08T00:20:52,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742174_1350 (size=12151) 2024-12-08T00:20:52,477 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/24955d4aab9342ff89917a96bc4fdd9e 2024-12-08T00:20:52,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/66f6069aa95149bca6e6d2a4403cfc2b is 50, key is test_row_0/B:col10/1733617251446/Put/seqid=0 2024-12-08T00:20:52,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742175_1351 (size=12151) 2024-12-08T00:20:52,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on d6019a516c33d3d08395be7add424e27 2024-12-08T00:20:52,558 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. as already flushing 2024-12-08T00:20:52,578 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:52,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37748 deadline: 1733617312575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:52,578 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:52,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37772 deadline: 1733617312575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:52,578 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:52,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37790 deadline: 1733617312576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:52,579 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:52,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37732 deadline: 1733617312577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:52,681 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:52,681 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:52,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37790 deadline: 1733617312679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:52,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37748 deadline: 1733617312679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:52,682 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:52,682 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:52,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37732 deadline: 1733617312680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:52,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37772 deadline: 1733617312679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:52,884 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:52,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37748 deadline: 1733617312883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:52,884 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:52,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37732 deadline: 1733617312883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:52,884 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:52,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37772 deadline: 1733617312883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:52,885 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:52,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37790 deadline: 1733617312883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:52,924 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/66f6069aa95149bca6e6d2a4403cfc2b 2024-12-08T00:20:52,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/588f8663d6334128976e3a8b2f79083f is 50, key is test_row_0/C:col10/1733617251446/Put/seqid=0 2024-12-08T00:20:52,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742176_1352 (size=12151) 2024-12-08T00:20:53,187 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:53,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37772 deadline: 1733617313185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:53,187 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:53,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37732 deadline: 1733617313186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:53,188 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:53,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37748 deadline: 1733617313187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:53,190 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:53,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37790 deadline: 1733617313188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:53,338 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/588f8663d6334128976e3a8b2f79083f 2024-12-08T00:20:53,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/24955d4aab9342ff89917a96bc4fdd9e as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/24955d4aab9342ff89917a96bc4fdd9e 2024-12-08T00:20:53,355 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/24955d4aab9342ff89917a96bc4fdd9e, entries=150, sequenceid=169, filesize=11.9 K 2024-12-08T00:20:53,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/66f6069aa95149bca6e6d2a4403cfc2b as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/66f6069aa95149bca6e6d2a4403cfc2b 2024-12-08T00:20:53,360 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/66f6069aa95149bca6e6d2a4403cfc2b, entries=150, sequenceid=169, filesize=11.9 K 2024-12-08T00:20:53,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/588f8663d6334128976e3a8b2f79083f as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/588f8663d6334128976e3a8b2f79083f 2024-12-08T00:20:53,365 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/588f8663d6334128976e3a8b2f79083f, entries=150, sequenceid=169, filesize=11.9 K 2024-12-08T00:20:53,366 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for d6019a516c33d3d08395be7add424e27 in 926ms, sequenceid=169, compaction requested=true 2024-12-08T00:20:53,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2538): Flush status journal for d6019a516c33d3d08395be7add424e27: 2024-12-08T00:20:53,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:53,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=78 2024-12-08T00:20:53,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4106): Remote procedure done, pid=78 2024-12-08T00:20:53,369 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=78, resume processing ppid=77 2024-12-08T00:20:53,369 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=78, ppid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.3770 sec 2024-12-08T00:20:53,371 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees in 3.3810 sec 2024-12-08T00:20:53,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on d6019a516c33d3d08395be7add424e27 2024-12-08T00:20:53,691 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d6019a516c33d3d08395be7add424e27 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-08T00:20:53,691 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=A 2024-12-08T00:20:53,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:53,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=B 2024-12-08T00:20:53,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:53,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=C 2024-12-08T00:20:53,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:53,697 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/3410c0a22dc9430fa037951afe82813c is 50, key is test_row_0/A:col10/1733617252575/Put/seqid=0 2024-12-08T00:20:53,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742177_1353 (size=14541) 2024-12-08T00:20:53,706 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:53,706 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:53,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37732 deadline: 1733617313701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:53,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37772 deadline: 1733617313702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:53,707 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:53,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37790 deadline: 1733617313704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:53,707 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:53,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37748 deadline: 1733617313704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:53,810 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:53,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37732 deadline: 1733617313807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:53,810 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:53,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37772 deadline: 1733617313807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:53,811 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:53,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37790 deadline: 1733617313808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:53,811 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:53,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37748 deadline: 1733617313808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:54,013 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:54,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37790 deadline: 1733617314011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:54,014 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:54,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37748 deadline: 1733617314012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:54,014 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:54,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37732 deadline: 1733617314012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:54,014 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:54,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37772 deadline: 1733617314012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:54,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-08T00:20:54,095 INFO [Thread-1424 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 77 completed 2024-12-08T00:20:54,096 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T00:20:54,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees 2024-12-08T00:20:54,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-08T00:20:54,098 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:20:54,098 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:20:54,098 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:20:54,104 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=193 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/3410c0a22dc9430fa037951afe82813c 2024-12-08T00:20:54,112 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/f2ecd9fbea2f41c7b9bc496ce7f679e0 is 50, key is test_row_0/B:col10/1733617252575/Put/seqid=0 2024-12-08T00:20:54,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742178_1354 (size=12151) 2024-12-08T00:20:54,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-08T00:20:54,250 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:54,250 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-08T00:20:54,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:54,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. as already flushing 2024-12-08T00:20:54,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:54,251 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:54,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:54,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:54,311 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:54,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37778 deadline: 1733617314309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:54,312 DEBUG [Thread-1414 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4134 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27., hostname=017dd09fb407,36703,1733617179335, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T00:20:54,316 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:54,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37790 deadline: 1733617314314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:54,317 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:54,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37748 deadline: 1733617314316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:54,318 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:54,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37732 deadline: 1733617314317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:54,318 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:54,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37772 deadline: 1733617314317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:54,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-08T00:20:54,403 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:54,404 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-08T00:20:54,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:54,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. as already flushing 2024-12-08T00:20:54,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:54,404 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:54,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:54,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:54,516 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=193 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/f2ecd9fbea2f41c7b9bc496ce7f679e0 2024-12-08T00:20:54,525 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/5f57061a4a3f4c2bb6152dea58552a7b is 50, key is test_row_0/C:col10/1733617252575/Put/seqid=0 2024-12-08T00:20:54,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742179_1355 (size=12151) 2024-12-08T00:20:54,543 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=193 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/5f57061a4a3f4c2bb6152dea58552a7b 2024-12-08T00:20:54,547 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/3410c0a22dc9430fa037951afe82813c as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/3410c0a22dc9430fa037951afe82813c 2024-12-08T00:20:54,552 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/3410c0a22dc9430fa037951afe82813c, entries=200, sequenceid=193, filesize=14.2 K 2024-12-08T00:20:54,553 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/f2ecd9fbea2f41c7b9bc496ce7f679e0 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/f2ecd9fbea2f41c7b9bc496ce7f679e0 2024-12-08T00:20:54,556 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:54,557 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-08T00:20:54,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:54,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. as already flushing 2024-12-08T00:20:54,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:54,557 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:54,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:54,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:54,561 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/f2ecd9fbea2f41c7b9bc496ce7f679e0, entries=150, sequenceid=193, filesize=11.9 K 2024-12-08T00:20:54,562 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/5f57061a4a3f4c2bb6152dea58552a7b as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/5f57061a4a3f4c2bb6152dea58552a7b 2024-12-08T00:20:54,567 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/5f57061a4a3f4c2bb6152dea58552a7b, entries=150, sequenceid=193, filesize=11.9 K 2024-12-08T00:20:54,568 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for d6019a516c33d3d08395be7add424e27 in 877ms, sequenceid=193, compaction requested=true 2024-12-08T00:20:54,568 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d6019a516c33d3d08395be7add424e27: 2024-12-08T00:20:54,568 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d6019a516c33d3d08395be7add424e27:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:20:54,568 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T00:20:54,568 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:54,568 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d6019a516c33d3d08395be7add424e27:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T00:20:54,568 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:20:54,568 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d6019a516c33d3d08395be7add424e27:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:20:54,568 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T00:20:54,568 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:20:54,570 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48812 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T00:20:54,570 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 53592 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T00:20:54,570 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): d6019a516c33d3d08395be7add424e27/B is initiating minor compaction (all files) 2024-12-08T00:20:54,570 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): d6019a516c33d3d08395be7add424e27/A is initiating minor compaction (all files) 2024-12-08T00:20:54,570 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d6019a516c33d3d08395be7add424e27/A in TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:54,570 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d6019a516c33d3d08395be7add424e27/B in TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:54,571 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/61a52732452d4f43aedc696c86ad98a2, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/a0cdcb07ff444898bde7b91d39610451, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/24955d4aab9342ff89917a96bc4fdd9e, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/3410c0a22dc9430fa037951afe82813c] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp, totalSize=52.3 K 2024-12-08T00:20:54,571 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/7b3b45f443024ba5b33aac05cf8695c6, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/b1f61414c39d460f9fe0a139abc27a56, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/66f6069aa95149bca6e6d2a4403cfc2b, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/f2ecd9fbea2f41c7b9bc496ce7f679e0] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp, totalSize=47.7 K 2024-12-08T00:20:54,571 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 7b3b45f443024ba5b33aac05cf8695c6, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1733617248992 2024-12-08T00:20:54,572 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 61a52732452d4f43aedc696c86ad98a2, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1733617248992 2024-12-08T00:20:54,572 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting b1f61414c39d460f9fe0a139abc27a56, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1733617250149 2024-12-08T00:20:54,573 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting a0cdcb07ff444898bde7b91d39610451, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1733617250149 2024-12-08T00:20:54,573 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 66f6069aa95149bca6e6d2a4403cfc2b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1733617251441 2024-12-08T00:20:54,573 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 24955d4aab9342ff89917a96bc4fdd9e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1733617251441 2024-12-08T00:20:54,574 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting f2ecd9fbea2f41c7b9bc496ce7f679e0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1733617252571 2024-12-08T00:20:54,574 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3410c0a22dc9430fa037951afe82813c, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1733617252571 2024-12-08T00:20:54,595 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d6019a516c33d3d08395be7add424e27#B#compaction#296 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:20:54,596 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/aaab20cb8af44755a3fd8955faf26d83 is 50, key is test_row_0/B:col10/1733617252575/Put/seqid=0 2024-12-08T00:20:54,598 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d6019a516c33d3d08395be7add424e27#A#compaction#297 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:20:54,599 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/9cda4a7bba8f458d8001edbe66f310e5 is 50, key is test_row_0/A:col10/1733617252575/Put/seqid=0 2024-12-08T00:20:54,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742180_1356 (size=12595) 2024-12-08T00:20:54,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742181_1357 (size=12595) 2024-12-08T00:20:54,607 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/aaab20cb8af44755a3fd8955faf26d83 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/aaab20cb8af44755a3fd8955faf26d83 2024-12-08T00:20:54,611 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d6019a516c33d3d08395be7add424e27/B of d6019a516c33d3d08395be7add424e27 into aaab20cb8af44755a3fd8955faf26d83(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:20:54,612 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d6019a516c33d3d08395be7add424e27: 2024-12-08T00:20:54,612 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27., storeName=d6019a516c33d3d08395be7add424e27/B, priority=12, startTime=1733617254568; duration=0sec 2024-12-08T00:20:54,612 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:20:54,612 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d6019a516c33d3d08395be7add424e27:B 2024-12-08T00:20:54,612 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T00:20:54,613 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48812 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T00:20:54,613 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): d6019a516c33d3d08395be7add424e27/C is initiating minor compaction (all files) 2024-12-08T00:20:54,613 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d6019a516c33d3d08395be7add424e27/C in TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:54,613 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/08d0026358a14885a2f7eae04087dda3, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/b421747bc48d46db94be9d09c8db7b9f, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/588f8663d6334128976e3a8b2f79083f, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/5f57061a4a3f4c2bb6152dea58552a7b] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp, totalSize=47.7 K 2024-12-08T00:20:54,614 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 08d0026358a14885a2f7eae04087dda3, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1733617248992 2024-12-08T00:20:54,614 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting b421747bc48d46db94be9d09c8db7b9f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1733617250149 2024-12-08T00:20:54,615 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 588f8663d6334128976e3a8b2f79083f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1733617251441 2024-12-08T00:20:54,615 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 5f57061a4a3f4c2bb6152dea58552a7b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1733617252571 2024-12-08T00:20:54,627 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d6019a516c33d3d08395be7add424e27#C#compaction#298 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:20:54,628 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/99983bc1d2e94ba68b2d54c8fa63cf34 is 50, key is test_row_0/C:col10/1733617252575/Put/seqid=0 2024-12-08T00:20:54,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742182_1358 (size=12595) 2024-12-08T00:20:54,645 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/99983bc1d2e94ba68b2d54c8fa63cf34 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/99983bc1d2e94ba68b2d54c8fa63cf34 2024-12-08T00:20:54,651 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d6019a516c33d3d08395be7add424e27/C of d6019a516c33d3d08395be7add424e27 into 99983bc1d2e94ba68b2d54c8fa63cf34(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:20:54,651 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d6019a516c33d3d08395be7add424e27: 2024-12-08T00:20:54,651 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27., storeName=d6019a516c33d3d08395be7add424e27/C, priority=12, startTime=1733617254568; duration=0sec 2024-12-08T00:20:54,651 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:54,651 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d6019a516c33d3d08395be7add424e27:C 2024-12-08T00:20:54,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-08T00:20:54,710 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:54,710 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-08T00:20:54,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:54,710 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2837): Flushing d6019a516c33d3d08395be7add424e27 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-08T00:20:54,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=A 2024-12-08T00:20:54,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:54,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=B 2024-12-08T00:20:54,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:54,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=C 2024-12-08T00:20:54,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:54,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/d33cccc8338e4a22ba36052af6eace73 is 50, key is test_row_0/A:col10/1733617253703/Put/seqid=0 2024-12-08T00:20:54,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742183_1359 (size=12151) 2024-12-08T00:20:54,721 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/d33cccc8338e4a22ba36052af6eace73 2024-12-08T00:20:54,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/2a4d524e9bd04e98924654841d693e55 is 50, key is test_row_0/B:col10/1733617253703/Put/seqid=0 2024-12-08T00:20:54,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742184_1360 (size=12151) 2024-12-08T00:20:54,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on d6019a516c33d3d08395be7add424e27 2024-12-08T00:20:54,824 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. as already flushing 2024-12-08T00:20:54,877 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:54,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37748 deadline: 1733617314874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:54,877 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:54,877 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:54,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37790 deadline: 1733617314874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:54,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37772 deadline: 1733617314874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:54,877 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:54,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37732 deadline: 1733617314875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:54,979 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:54,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37748 deadline: 1733617314978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:54,979 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:54,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37790 deadline: 1733617314978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:54,980 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:54,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37772 deadline: 1733617314978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:54,980 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:54,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37732 deadline: 1733617314979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:55,010 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/9cda4a7bba8f458d8001edbe66f310e5 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/9cda4a7bba8f458d8001edbe66f310e5 2024-12-08T00:20:55,014 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d6019a516c33d3d08395be7add424e27/A of d6019a516c33d3d08395be7add424e27 into 9cda4a7bba8f458d8001edbe66f310e5(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:20:55,014 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d6019a516c33d3d08395be7add424e27: 2024-12-08T00:20:55,014 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27., storeName=d6019a516c33d3d08395be7add424e27/A, priority=12, startTime=1733617254568; duration=0sec 2024-12-08T00:20:55,014 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:55,014 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d6019a516c33d3d08395be7add424e27:A 2024-12-08T00:20:55,140 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/2a4d524e9bd04e98924654841d693e55 2024-12-08T00:20:55,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/95e8ad8f667642559819d616434f27c2 is 50, key is test_row_0/C:col10/1733617253703/Put/seqid=0 2024-12-08T00:20:55,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742185_1361 (size=12151) 2024-12-08T00:20:55,182 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:55,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37772 deadline: 1733617315181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:55,182 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:55,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37732 deadline: 1733617315181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:55,183 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:55,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37748 deadline: 1733617315181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:55,183 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:55,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37790 deadline: 1733617315181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:55,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-08T00:20:55,484 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:55,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37732 deadline: 1733617315483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:55,486 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:55,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37772 deadline: 1733617315484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:55,486 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:55,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37748 deadline: 1733617315485, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:55,486 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:55,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37790 deadline: 1733617315486, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:55,572 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/95e8ad8f667642559819d616434f27c2 2024-12-08T00:20:55,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/d33cccc8338e4a22ba36052af6eace73 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/d33cccc8338e4a22ba36052af6eace73 2024-12-08T00:20:55,588 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/d33cccc8338e4a22ba36052af6eace73, entries=150, sequenceid=207, filesize=11.9 K 2024-12-08T00:20:55,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/2a4d524e9bd04e98924654841d693e55 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/2a4d524e9bd04e98924654841d693e55 2024-12-08T00:20:55,592 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/2a4d524e9bd04e98924654841d693e55, entries=150, sequenceid=207, filesize=11.9 K 2024-12-08T00:20:55,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/95e8ad8f667642559819d616434f27c2 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/95e8ad8f667642559819d616434f27c2 2024-12-08T00:20:55,597 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/95e8ad8f667642559819d616434f27c2, entries=150, sequenceid=207, filesize=11.9 K 2024-12-08T00:20:55,598 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for d6019a516c33d3d08395be7add424e27 in 887ms, sequenceid=207, compaction requested=false 2024-12-08T00:20:55,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2538): Flush status journal for d6019a516c33d3d08395be7add424e27: 2024-12-08T00:20:55,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:55,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=80 2024-12-08T00:20:55,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4106): Remote procedure done, pid=80 2024-12-08T00:20:55,600 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=80, resume processing ppid=79 2024-12-08T00:20:55,600 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=80, ppid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5010 sec 2024-12-08T00:20:55,602 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees in 1.5050 sec 2024-12-08T00:20:55,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on d6019a516c33d3d08395be7add424e27 2024-12-08T00:20:55,989 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d6019a516c33d3d08395be7add424e27 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-08T00:20:55,990 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=A 2024-12-08T00:20:55,990 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:55,990 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=B 2024-12-08T00:20:55,990 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:55,990 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=C 2024-12-08T00:20:55,990 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:55,997 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/2e6fe6e4f169405fb94035d02bc62ecf is 50, key is test_row_0/A:col10/1733617254874/Put/seqid=0 2024-12-08T00:20:56,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742186_1362 (size=12151) 2024-12-08T00:20:56,003 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:56,003 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:56,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37748 deadline: 1733617315999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:56,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37732 deadline: 1733617316000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:56,005 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:56,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37790 deadline: 1733617316003, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:56,006 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:56,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37772 deadline: 1733617316003, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:56,105 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:56,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37748 deadline: 1733617316104, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:56,105 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:56,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37732 deadline: 1733617316104, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:56,107 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:56,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37790 deadline: 1733617316106, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:56,109 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:56,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37772 deadline: 1733617316107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:56,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-08T00:20:56,202 INFO [Thread-1424 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 79 completed 2024-12-08T00:20:56,204 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T00:20:56,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees 2024-12-08T00:20:56,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-08T00:20:56,206 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:20:56,206 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:20:56,206 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:20:56,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-08T00:20:56,308 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:56,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37732 deadline: 1733617316306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:56,308 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:56,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37748 deadline: 1733617316307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:56,310 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:56,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37790 deadline: 1733617316309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:56,310 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:56,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37772 deadline: 1733617316309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:56,358 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:56,358 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-08T00:20:56,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:56,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. as already flushing 2024-12-08T00:20:56,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:56,359 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:56,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:56,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:56,402 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/2e6fe6e4f169405fb94035d02bc62ecf 2024-12-08T00:20:56,418 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/c4c1dc4c1dec4c588dba85853cbb1db5 is 50, key is test_row_0/B:col10/1733617254874/Put/seqid=0 2024-12-08T00:20:56,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742187_1363 (size=12151) 2024-12-08T00:20:56,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-08T00:20:56,510 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:56,511 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-08T00:20:56,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:56,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. as already flushing 2024-12-08T00:20:56,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:56,511 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:56,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:56,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:56,610 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:56,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37748 deadline: 1733617316609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:56,612 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:56,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37732 deadline: 1733617316610, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:56,613 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:56,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37772 deadline: 1733617316611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:56,614 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:56,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37790 deadline: 1733617316613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:56,663 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:56,664 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-08T00:20:56,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:56,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. as already flushing 2024-12-08T00:20:56,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:56,664 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:56,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:56,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:56,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-08T00:20:56,820 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:56,821 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-08T00:20:56,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:56,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. as already flushing 2024-12-08T00:20:56,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:56,821 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:56,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:56,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:56,822 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/c4c1dc4c1dec4c588dba85853cbb1db5 2024-12-08T00:20:56,831 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/f3f55c5a214f44548bd1428ace286a41 is 50, key is test_row_0/C:col10/1733617254874/Put/seqid=0 2024-12-08T00:20:56,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742188_1364 (size=12151) 2024-12-08T00:20:56,973 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:56,974 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-08T00:20:56,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:56,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. as already flushing 2024-12-08T00:20:56,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:56,974 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:56,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:56,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:57,114 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:57,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37748 deadline: 1733617317111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:57,117 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:57,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37772 deadline: 1733617317116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:57,118 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:57,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37732 deadline: 1733617317117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:57,119 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:57,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37790 deadline: 1733617317118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:57,126 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:57,127 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-08T00:20:57,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:57,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. as already flushing 2024-12-08T00:20:57,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:57,127 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:57,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:57,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:57,240 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/f3f55c5a214f44548bd1428ace286a41 2024-12-08T00:20:57,246 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/2e6fe6e4f169405fb94035d02bc62ecf as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/2e6fe6e4f169405fb94035d02bc62ecf 2024-12-08T00:20:57,250 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/2e6fe6e4f169405fb94035d02bc62ecf, entries=150, sequenceid=233, filesize=11.9 K 2024-12-08T00:20:57,250 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/c4c1dc4c1dec4c588dba85853cbb1db5 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/c4c1dc4c1dec4c588dba85853cbb1db5 2024-12-08T00:20:57,254 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/c4c1dc4c1dec4c588dba85853cbb1db5, entries=150, sequenceid=233, filesize=11.9 K 2024-12-08T00:20:57,255 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/f3f55c5a214f44548bd1428ace286a41 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/f3f55c5a214f44548bd1428ace286a41 2024-12-08T00:20:57,259 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/f3f55c5a214f44548bd1428ace286a41, entries=150, sequenceid=233, filesize=11.9 K 2024-12-08T00:20:57,260 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for d6019a516c33d3d08395be7add424e27 in 1271ms, sequenceid=233, compaction requested=true 2024-12-08T00:20:57,260 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d6019a516c33d3d08395be7add424e27: 2024-12-08T00:20:57,260 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d6019a516c33d3d08395be7add424e27:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:20:57,260 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:57,260 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d6019a516c33d3d08395be7add424e27:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T00:20:57,260 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:57,260 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d6019a516c33d3d08395be7add424e27:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:20:57,260 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:20:57,260 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:20:57,260 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:20:57,261 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:20:57,261 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:20:57,261 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): d6019a516c33d3d08395be7add424e27/B is initiating minor compaction (all files) 2024-12-08T00:20:57,261 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): d6019a516c33d3d08395be7add424e27/A is initiating minor compaction (all files) 2024-12-08T00:20:57,261 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d6019a516c33d3d08395be7add424e27/A in TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:57,261 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d6019a516c33d3d08395be7add424e27/B in TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:57,262 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/9cda4a7bba8f458d8001edbe66f310e5, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/d33cccc8338e4a22ba36052af6eace73, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/2e6fe6e4f169405fb94035d02bc62ecf] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp, totalSize=36.0 K 2024-12-08T00:20:57,262 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/aaab20cb8af44755a3fd8955faf26d83, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/2a4d524e9bd04e98924654841d693e55, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/c4c1dc4c1dec4c588dba85853cbb1db5] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp, totalSize=36.0 K 2024-12-08T00:20:57,262 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9cda4a7bba8f458d8001edbe66f310e5, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1733617252571 2024-12-08T00:20:57,262 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting aaab20cb8af44755a3fd8955faf26d83, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1733617252571 2024-12-08T00:20:57,262 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 2a4d524e9bd04e98924654841d693e55, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1733617253699 2024-12-08T00:20:57,263 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting d33cccc8338e4a22ba36052af6eace73, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1733617253699 2024-12-08T00:20:57,263 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting c4c1dc4c1dec4c588dba85853cbb1db5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1733617254845 2024-12-08T00:20:57,263 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2e6fe6e4f169405fb94035d02bc62ecf, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1733617254845 2024-12-08T00:20:57,271 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d6019a516c33d3d08395be7add424e27#A#compaction#305 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:20:57,272 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/5d5a8f36f1c9412eb129776a6568eee3 is 50, key is test_row_0/A:col10/1733617254874/Put/seqid=0 2024-12-08T00:20:57,276 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d6019a516c33d3d08395be7add424e27#B#compaction#306 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:20:57,277 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/8c716ef536544043bd2d9dc813d9f663 is 50, key is test_row_0/B:col10/1733617254874/Put/seqid=0 2024-12-08T00:20:57,279 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:57,279 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-08T00:20:57,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:57,280 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2837): Flushing d6019a516c33d3d08395be7add424e27 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-08T00:20:57,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=A 2024-12-08T00:20:57,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:57,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=B 2024-12-08T00:20:57,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:57,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=C 2024-12-08T00:20:57,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:57,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/35da137b21bf4fdda3d656641c6407ac is 50, key is test_row_0/A:col10/1733617256002/Put/seqid=0 2024-12-08T00:20:57,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-08T00:20:57,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742189_1365 (size=12697) 2024-12-08T00:20:57,317 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/5d5a8f36f1c9412eb129776a6568eee3 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/5d5a8f36f1c9412eb129776a6568eee3 2024-12-08T00:20:57,323 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d6019a516c33d3d08395be7add424e27/A of d6019a516c33d3d08395be7add424e27 into 5d5a8f36f1c9412eb129776a6568eee3(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:20:57,323 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d6019a516c33d3d08395be7add424e27: 2024-12-08T00:20:57,323 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27., storeName=d6019a516c33d3d08395be7add424e27/A, priority=13, startTime=1733617257260; duration=0sec 2024-12-08T00:20:57,323 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:20:57,323 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d6019a516c33d3d08395be7add424e27:A 2024-12-08T00:20:57,323 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:20:57,325 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:20:57,325 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): d6019a516c33d3d08395be7add424e27/C is initiating minor compaction (all files) 2024-12-08T00:20:57,325 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d6019a516c33d3d08395be7add424e27/C in TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:57,325 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/99983bc1d2e94ba68b2d54c8fa63cf34, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/95e8ad8f667642559819d616434f27c2, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/f3f55c5a214f44548bd1428ace286a41] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp, totalSize=36.0 K 2024-12-08T00:20:57,325 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 99983bc1d2e94ba68b2d54c8fa63cf34, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1733617252571 2024-12-08T00:20:57,326 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 95e8ad8f667642559819d616434f27c2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1733617253699 2024-12-08T00:20:57,326 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting f3f55c5a214f44548bd1428ace286a41, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1733617254845 2024-12-08T00:20:57,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742190_1366 (size=12697) 2024-12-08T00:20:57,331 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/8c716ef536544043bd2d9dc813d9f663 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/8c716ef536544043bd2d9dc813d9f663 2024-12-08T00:20:57,338 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d6019a516c33d3d08395be7add424e27/B of d6019a516c33d3d08395be7add424e27 into 8c716ef536544043bd2d9dc813d9f663(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:20:57,338 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d6019a516c33d3d08395be7add424e27: 2024-12-08T00:20:57,338 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27., storeName=d6019a516c33d3d08395be7add424e27/B, priority=13, startTime=1733617257260; duration=0sec 2024-12-08T00:20:57,338 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:57,338 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d6019a516c33d3d08395be7add424e27:B 2024-12-08T00:20:57,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742191_1367 (size=12151) 2024-12-08T00:20:57,354 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=244 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/35da137b21bf4fdda3d656641c6407ac 2024-12-08T00:20:57,357 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d6019a516c33d3d08395be7add424e27#C#compaction#308 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:20:57,358 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/930885ef78594cf29cec14510ce11680 is 50, key is test_row_0/C:col10/1733617254874/Put/seqid=0 2024-12-08T00:20:57,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/250fef7314a14a15b909605a54b77a9f is 50, key is test_row_0/B:col10/1733617256002/Put/seqid=0 2024-12-08T00:20:57,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742192_1368 (size=12697) 2024-12-08T00:20:57,399 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/930885ef78594cf29cec14510ce11680 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/930885ef78594cf29cec14510ce11680 2024-12-08T00:20:57,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742193_1369 (size=12151) 2024-12-08T00:20:57,404 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=244 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/250fef7314a14a15b909605a54b77a9f 2024-12-08T00:20:57,406 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d6019a516c33d3d08395be7add424e27/C of d6019a516c33d3d08395be7add424e27 into 930885ef78594cf29cec14510ce11680(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:20:57,406 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d6019a516c33d3d08395be7add424e27: 2024-12-08T00:20:57,406 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27., storeName=d6019a516c33d3d08395be7add424e27/C, priority=13, startTime=1733617257260; duration=0sec 2024-12-08T00:20:57,407 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:57,407 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d6019a516c33d3d08395be7add424e27:C 2024-12-08T00:20:57,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/782bd7a5999f4f9f893931ae9b6752e3 is 50, key is test_row_0/C:col10/1733617256002/Put/seqid=0 2024-12-08T00:20:57,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742194_1370 (size=12151) 2024-12-08T00:20:57,836 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=244 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/782bd7a5999f4f9f893931ae9b6752e3 2024-12-08T00:20:57,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/35da137b21bf4fdda3d656641c6407ac as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/35da137b21bf4fdda3d656641c6407ac 2024-12-08T00:20:57,848 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/35da137b21bf4fdda3d656641c6407ac, entries=150, sequenceid=244, filesize=11.9 K 2024-12-08T00:20:57,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/250fef7314a14a15b909605a54b77a9f as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/250fef7314a14a15b909605a54b77a9f 2024-12-08T00:20:57,865 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/250fef7314a14a15b909605a54b77a9f, entries=150, sequenceid=244, filesize=11.9 K 2024-12-08T00:20:57,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/782bd7a5999f4f9f893931ae9b6752e3 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/782bd7a5999f4f9f893931ae9b6752e3 2024-12-08T00:20:57,874 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/782bd7a5999f4f9f893931ae9b6752e3, entries=150, sequenceid=244, filesize=11.9 K 2024-12-08T00:20:57,875 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=0 B/0 for d6019a516c33d3d08395be7add424e27 in 595ms, sequenceid=244, compaction requested=false 2024-12-08T00:20:57,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2538): Flush status journal for d6019a516c33d3d08395be7add424e27: 2024-12-08T00:20:57,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:57,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=82 2024-12-08T00:20:57,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4106): Remote procedure done, pid=82 2024-12-08T00:20:57,879 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=82, resume processing ppid=81 2024-12-08T00:20:57,879 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=82, ppid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6700 sec 2024-12-08T00:20:57,881 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees in 1.6750 sec 2024-12-08T00:20:58,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on d6019a516c33d3d08395be7add424e27 2024-12-08T00:20:58,132 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d6019a516c33d3d08395be7add424e27 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-08T00:20:58,133 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=A 2024-12-08T00:20:58,133 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:58,133 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=B 2024-12-08T00:20:58,133 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:58,133 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=C 2024-12-08T00:20:58,133 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:58,138 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/9e8ef21bff594ac0b45c953749f41688 is 50, key is test_row_0/A:col10/1733617258131/Put/seqid=0 2024-12-08T00:20:58,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742195_1371 (size=14741) 2024-12-08T00:20:58,144 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=261 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/9e8ef21bff594ac0b45c953749f41688 2024-12-08T00:20:58,152 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/6d7d4aadb27d46e89cf7c11032d08413 is 50, key is test_row_0/B:col10/1733617258131/Put/seqid=0 2024-12-08T00:20:58,159 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:58,159 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:58,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37748 deadline: 1733617318157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:58,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37732 deadline: 1733617318156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:58,161 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:58,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37790 deadline: 1733617318158, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:58,161 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:58,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37772 deadline: 1733617318158, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:58,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742196_1372 (size=12301) 2024-12-08T00:20:58,166 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=261 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/6d7d4aadb27d46e89cf7c11032d08413 2024-12-08T00:20:58,173 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/e1a081aa9d1e4da9bacc9a3b6aacdd91 is 50, key is test_row_0/C:col10/1733617258131/Put/seqid=0 2024-12-08T00:20:58,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742197_1373 (size=12301) 2024-12-08T00:20:58,262 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:58,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37732 deadline: 1733617318261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:58,262 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:58,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37748 deadline: 1733617318261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:58,263 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:58,263 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:58,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37790 deadline: 1733617318261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:58,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37772 deadline: 1733617318262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:58,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-08T00:20:58,310 INFO [Thread-1424 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 81 completed 2024-12-08T00:20:58,311 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T00:20:58,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees 2024-12-08T00:20:58,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-08T00:20:58,313 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:20:58,313 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:20:58,313 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:20:58,336 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:58,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37778 deadline: 1733617318335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:58,337 DEBUG [Thread-1414 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8159 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27., hostname=017dd09fb407,36703,1733617179335, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T00:20:58,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-08T00:20:58,465 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:58,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37732 deadline: 1733617318463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:58,466 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:58,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37748 deadline: 1733617318464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:58,466 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:58,466 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:58,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37790 deadline: 1733617318464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:58,467 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:58,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37772 deadline: 1733617318464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:58,467 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-12-08T00:20:58,467 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:58,467 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. as already flushing 2024-12-08T00:20:58,467 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:58,467 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:58,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:58,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:58,578 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=261 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/e1a081aa9d1e4da9bacc9a3b6aacdd91 2024-12-08T00:20:58,583 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/9e8ef21bff594ac0b45c953749f41688 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/9e8ef21bff594ac0b45c953749f41688 2024-12-08T00:20:58,587 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/9e8ef21bff594ac0b45c953749f41688, entries=200, sequenceid=261, filesize=14.4 K 2024-12-08T00:20:58,588 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/6d7d4aadb27d46e89cf7c11032d08413 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/6d7d4aadb27d46e89cf7c11032d08413 2024-12-08T00:20:58,591 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/6d7d4aadb27d46e89cf7c11032d08413, entries=150, sequenceid=261, filesize=12.0 K 2024-12-08T00:20:58,592 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/e1a081aa9d1e4da9bacc9a3b6aacdd91 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/e1a081aa9d1e4da9bacc9a3b6aacdd91 2024-12-08T00:20:58,596 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/e1a081aa9d1e4da9bacc9a3b6aacdd91, entries=150, sequenceid=261, filesize=12.0 K 2024-12-08T00:20:58,596 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for d6019a516c33d3d08395be7add424e27 in 464ms, sequenceid=261, compaction requested=true 2024-12-08T00:20:58,596 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d6019a516c33d3d08395be7add424e27: 2024-12-08T00:20:58,597 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d6019a516c33d3d08395be7add424e27:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:20:58,597 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:58,597 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d6019a516c33d3d08395be7add424e27:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T00:20:58,597 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:58,597 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d6019a516c33d3d08395be7add424e27:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:20:58,597 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:20:58,597 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:20:58,597 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:20:58,598 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39589 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:20:58,598 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:20:58,598 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): d6019a516c33d3d08395be7add424e27/A is initiating minor compaction (all files) 2024-12-08T00:20:58,598 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): d6019a516c33d3d08395be7add424e27/B is initiating minor compaction (all files) 2024-12-08T00:20:58,598 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d6019a516c33d3d08395be7add424e27/A in TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:58,598 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d6019a516c33d3d08395be7add424e27/B in TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:58,598 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/5d5a8f36f1c9412eb129776a6568eee3, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/35da137b21bf4fdda3d656641c6407ac, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/9e8ef21bff594ac0b45c953749f41688] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp, totalSize=38.7 K 2024-12-08T00:20:58,598 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/8c716ef536544043bd2d9dc813d9f663, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/250fef7314a14a15b909605a54b77a9f, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/6d7d4aadb27d46e89cf7c11032d08413] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp, totalSize=36.3 K 2024-12-08T00:20:58,598 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5d5a8f36f1c9412eb129776a6568eee3, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1733617254845 2024-12-08T00:20:58,598 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 8c716ef536544043bd2d9dc813d9f663, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1733617254845 2024-12-08T00:20:58,599 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 35da137b21bf4fdda3d656641c6407ac, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=244, earliestPutTs=1733617255992 2024-12-08T00:20:58,599 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 250fef7314a14a15b909605a54b77a9f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=244, earliestPutTs=1733617255992 2024-12-08T00:20:58,599 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9e8ef21bff594ac0b45c953749f41688, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=261, earliestPutTs=1733617258131 2024-12-08T00:20:58,599 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 6d7d4aadb27d46e89cf7c11032d08413, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=261, earliestPutTs=1733617258131 2024-12-08T00:20:58,611 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d6019a516c33d3d08395be7add424e27#B#compaction#314 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:20:58,612 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/497ce66d2930495ea1583b49dc8ed53f is 50, key is test_row_0/B:col10/1733617258131/Put/seqid=0 2024-12-08T00:20:58,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-08T00:20:58,615 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d6019a516c33d3d08395be7add424e27#A#compaction#315 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:20:58,615 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/26f7ac88ca4a49228861c1744e8c4867 is 50, key is test_row_0/A:col10/1733617258131/Put/seqid=0 2024-12-08T00:20:58,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742198_1374 (size=12949) 2024-12-08T00:20:58,619 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:58,619 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-12-08T00:20:58,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:58,620 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2837): Flushing d6019a516c33d3d08395be7add424e27 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-08T00:20:58,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=A 2024-12-08T00:20:58,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:58,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=B 2024-12-08T00:20:58,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:58,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=C 2024-12-08T00:20:58,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:58,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742199_1375 (size=12949) 2024-12-08T00:20:58,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/929f7de467374ac3ba15636d9865842e is 50, key is test_row_0/A:col10/1733617258157/Put/seqid=0 2024-12-08T00:20:58,627 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/497ce66d2930495ea1583b49dc8ed53f as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/497ce66d2930495ea1583b49dc8ed53f 2024-12-08T00:20:58,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742200_1376 (size=12301) 2024-12-08T00:20:58,632 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d6019a516c33d3d08395be7add424e27/B of d6019a516c33d3d08395be7add424e27 into 497ce66d2930495ea1583b49dc8ed53f(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:20:58,633 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d6019a516c33d3d08395be7add424e27: 2024-12-08T00:20:58,633 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27., storeName=d6019a516c33d3d08395be7add424e27/B, priority=13, startTime=1733617258597; duration=0sec 2024-12-08T00:20:58,633 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:20:58,633 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d6019a516c33d3d08395be7add424e27:B 2024-12-08T00:20:58,633 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=284 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/929f7de467374ac3ba15636d9865842e 2024-12-08T00:20:58,633 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:20:58,634 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:20:58,634 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): d6019a516c33d3d08395be7add424e27/C is initiating minor compaction (all files) 2024-12-08T00:20:58,635 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d6019a516c33d3d08395be7add424e27/C in TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:58,635 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/930885ef78594cf29cec14510ce11680, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/782bd7a5999f4f9f893931ae9b6752e3, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/e1a081aa9d1e4da9bacc9a3b6aacdd91] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp, totalSize=36.3 K 2024-12-08T00:20:58,635 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 930885ef78594cf29cec14510ce11680, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1733617254845 2024-12-08T00:20:58,636 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 782bd7a5999f4f9f893931ae9b6752e3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=244, earliestPutTs=1733617255992 2024-12-08T00:20:58,637 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting e1a081aa9d1e4da9bacc9a3b6aacdd91, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=261, earliestPutTs=1733617258131 2024-12-08T00:20:58,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/25495306714d4c9bad92e5cf2311c56b is 50, key is test_row_0/B:col10/1733617258157/Put/seqid=0 2024-12-08T00:20:58,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742201_1377 (size=12301) 2024-12-08T00:20:58,646 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=284 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/25495306714d4c9bad92e5cf2311c56b 2024-12-08T00:20:58,646 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d6019a516c33d3d08395be7add424e27#C#compaction#318 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:20:58,646 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/7974958caad34e5e8fc75534541d0a8e is 50, key is test_row_0/C:col10/1733617258131/Put/seqid=0 2024-12-08T00:20:58,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/8b50c3b854544d2e88f33c39c7b032ad is 50, key is test_row_0/C:col10/1733617258157/Put/seqid=0 2024-12-08T00:20:58,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742202_1378 (size=12949) 2024-12-08T00:20:58,679 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/7974958caad34e5e8fc75534541d0a8e as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/7974958caad34e5e8fc75534541d0a8e 2024-12-08T00:20:58,685 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d6019a516c33d3d08395be7add424e27/C of d6019a516c33d3d08395be7add424e27 into 7974958caad34e5e8fc75534541d0a8e(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:20:58,685 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d6019a516c33d3d08395be7add424e27: 2024-12-08T00:20:58,685 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27., storeName=d6019a516c33d3d08395be7add424e27/C, priority=13, startTime=1733617258597; duration=0sec 2024-12-08T00:20:58,685 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:58,685 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d6019a516c33d3d08395be7add424e27:C 2024-12-08T00:20:58,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742203_1379 (size=12301) 2024-12-08T00:20:58,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on d6019a516c33d3d08395be7add424e27 2024-12-08T00:20:58,770 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. as already flushing 2024-12-08T00:20:58,782 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:58,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37732 deadline: 1733617318779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:58,782 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:58,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37748 deadline: 1733617318779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:58,783 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:58,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37790 deadline: 1733617318779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:58,783 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:58,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37772 deadline: 1733617318780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:58,884 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:58,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37732 deadline: 1733617318883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:58,885 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:58,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37790 deadline: 1733617318883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:58,885 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:58,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37748 deadline: 1733617318883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:58,885 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:58,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37772 deadline: 1733617318884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:58,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-08T00:20:59,029 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/26f7ac88ca4a49228861c1744e8c4867 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/26f7ac88ca4a49228861c1744e8c4867 2024-12-08T00:20:59,035 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d6019a516c33d3d08395be7add424e27/A of d6019a516c33d3d08395be7add424e27 into 26f7ac88ca4a49228861c1744e8c4867(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:20:59,035 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d6019a516c33d3d08395be7add424e27: 2024-12-08T00:20:59,035 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27., storeName=d6019a516c33d3d08395be7add424e27/A, priority=13, startTime=1733617258597; duration=0sec 2024-12-08T00:20:59,035 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:20:59,035 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d6019a516c33d3d08395be7add424e27:A 2024-12-08T00:20:59,087 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=284 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/8b50c3b854544d2e88f33c39c7b032ad 2024-12-08T00:20:59,088 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:59,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37732 deadline: 1733617319086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:59,089 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:59,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37790 deadline: 1733617319086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:59,089 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:59,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37772 deadline: 1733617319086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:59,090 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:59,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37748 deadline: 1733617319086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:59,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/929f7de467374ac3ba15636d9865842e as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/929f7de467374ac3ba15636d9865842e 2024-12-08T00:20:59,100 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/929f7de467374ac3ba15636d9865842e, entries=150, sequenceid=284, filesize=12.0 K 2024-12-08T00:20:59,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/25495306714d4c9bad92e5cf2311c56b as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/25495306714d4c9bad92e5cf2311c56b 2024-12-08T00:20:59,106 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/25495306714d4c9bad92e5cf2311c56b, entries=150, sequenceid=284, filesize=12.0 K 2024-12-08T00:20:59,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/8b50c3b854544d2e88f33c39c7b032ad as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/8b50c3b854544d2e88f33c39c7b032ad 2024-12-08T00:20:59,113 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/8b50c3b854544d2e88f33c39c7b032ad, entries=150, sequenceid=284, filesize=12.0 K 2024-12-08T00:20:59,115 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=80.51 KB/82440 for d6019a516c33d3d08395be7add424e27 in 495ms, sequenceid=284, compaction requested=false 2024-12-08T00:20:59,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2538): Flush status journal for d6019a516c33d3d08395be7add424e27: 2024-12-08T00:20:59,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:59,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=84 2024-12-08T00:20:59,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4106): Remote procedure done, pid=84 2024-12-08T00:20:59,119 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=84, resume processing ppid=83 2024-12-08T00:20:59,119 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=84, ppid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 804 msec 2024-12-08T00:20:59,122 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees in 810 msec 2024-12-08T00:20:59,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on d6019a516c33d3d08395be7add424e27 2024-12-08T00:20:59,394 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d6019a516c33d3d08395be7add424e27 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-08T00:20:59,394 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=A 2024-12-08T00:20:59,394 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:59,394 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=B 2024-12-08T00:20:59,394 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:59,394 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=C 2024-12-08T00:20:59,394 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:20:59,399 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/390a39d6fe814533ac6bd5fa121388c7 is 50, key is test_row_0/A:col10/1733617259392/Put/seqid=0 2024-12-08T00:20:59,413 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:59,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37772 deadline: 1733617319410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:59,413 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:59,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37790 deadline: 1733617319411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:59,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-08T00:20:59,416 INFO [Thread-1424 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 83 completed 2024-12-08T00:20:59,416 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:59,416 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:59,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37732 deadline: 1733617319413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:59,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37748 deadline: 1733617319413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:59,418 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T00:20:59,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees 2024-12-08T00:20:59,419 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:20:59,419 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:20:59,420 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=86, ppid=85, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:20:59,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-08T00:20:59,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742204_1380 (size=12301) 2024-12-08T00:20:59,429 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=304 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/390a39d6fe814533ac6bd5fa121388c7 2024-12-08T00:20:59,437 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/a7956b0a92764ad3a29d5450635a5d82 is 50, key is test_row_0/B:col10/1733617259392/Put/seqid=0 2024-12-08T00:20:59,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742205_1381 (size=12301) 2024-12-08T00:20:59,515 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:59,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37772 deadline: 1733617319514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:59,516 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:59,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37790 deadline: 1733617319514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:59,519 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:59,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37732 deadline: 1733617319517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:59,519 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:59,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37748 deadline: 1733617319517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:59,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-08T00:20:59,571 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:59,572 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-08T00:20:59,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:59,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. as already flushing 2024-12-08T00:20:59,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:59,572 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:59,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:59,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:59,718 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:59,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37772 deadline: 1733617319717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:59,720 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:59,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37790 deadline: 1733617319718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:59,721 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:59,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37732 deadline: 1733617319720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:59,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-08T00:20:59,723 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:20:59,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37748 deadline: 1733617319721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:20:59,724 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:59,725 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-08T00:20:59,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:59,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. as already flushing 2024-12-08T00:20:59,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:59,725 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:59,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:59,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:59,859 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=304 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/a7956b0a92764ad3a29d5450635a5d82 2024-12-08T00:20:59,868 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/6249f95c514044b6baeceade7bc2eb88 is 50, key is test_row_0/C:col10/1733617259392/Put/seqid=0 2024-12-08T00:20:59,877 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:20:59,877 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-08T00:20:59,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:59,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. as already flushing 2024-12-08T00:20:59,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:20:59,878 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:59,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:59,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:20:59,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742206_1382 (size=12301) 2024-12-08T00:21:00,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-08T00:21:00,022 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:00,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37772 deadline: 1733617320020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:00,022 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:00,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37790 deadline: 1733617320021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:00,024 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:00,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37732 deadline: 1733617320024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:00,026 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:00,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37748 deadline: 1733617320025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:00,029 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:00,029 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-08T00:21:00,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:21:00,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. as already flushing 2024-12-08T00:21:00,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:21:00,030 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:00,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:00,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:00,182 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:00,182 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-08T00:21:00,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:21:00,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. as already flushing 2024-12-08T00:21:00,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:21:00,182 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:00,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:00,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:00,294 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=304 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/6249f95c514044b6baeceade7bc2eb88 2024-12-08T00:21:00,299 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/390a39d6fe814533ac6bd5fa121388c7 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/390a39d6fe814533ac6bd5fa121388c7 2024-12-08T00:21:00,303 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/390a39d6fe814533ac6bd5fa121388c7, entries=150, sequenceid=304, filesize=12.0 K 2024-12-08T00:21:00,303 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/a7956b0a92764ad3a29d5450635a5d82 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/a7956b0a92764ad3a29d5450635a5d82 2024-12-08T00:21:00,318 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/a7956b0a92764ad3a29d5450635a5d82, entries=150, sequenceid=304, filesize=12.0 K 2024-12-08T00:21:00,319 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/6249f95c514044b6baeceade7bc2eb88 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/6249f95c514044b6baeceade7bc2eb88 2024-12-08T00:21:00,324 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/6249f95c514044b6baeceade7bc2eb88, entries=150, sequenceid=304, filesize=12.0 K 2024-12-08T00:21:00,324 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=114.05 KB/116790 for d6019a516c33d3d08395be7add424e27 in 931ms, sequenceid=304, compaction requested=true 2024-12-08T00:21:00,325 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d6019a516c33d3d08395be7add424e27: 2024-12-08T00:21:00,325 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d6019a516c33d3d08395be7add424e27:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:21:00,325 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:00,325 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d6019a516c33d3d08395be7add424e27:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T00:21:00,325 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:00,325 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:21:00,325 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d6019a516c33d3d08395be7add424e27:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:21:00,325 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:21:00,325 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:21:00,326 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:21:00,326 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:21:00,326 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): d6019a516c33d3d08395be7add424e27/B is initiating minor compaction (all files) 2024-12-08T00:21:00,326 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): d6019a516c33d3d08395be7add424e27/A is initiating minor compaction (all files) 2024-12-08T00:21:00,326 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d6019a516c33d3d08395be7add424e27/B in TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:21:00,326 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/497ce66d2930495ea1583b49dc8ed53f, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/25495306714d4c9bad92e5cf2311c56b, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/a7956b0a92764ad3a29d5450635a5d82] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp, totalSize=36.7 K 2024-12-08T00:21:00,326 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d6019a516c33d3d08395be7add424e27/A in TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:21:00,326 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/26f7ac88ca4a49228861c1744e8c4867, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/929f7de467374ac3ba15636d9865842e, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/390a39d6fe814533ac6bd5fa121388c7] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp, totalSize=36.7 K 2024-12-08T00:21:00,327 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 497ce66d2930495ea1583b49dc8ed53f, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=261, earliestPutTs=1733617258131 2024-12-08T00:21:00,327 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 26f7ac88ca4a49228861c1744e8c4867, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=261, earliestPutTs=1733617258131 2024-12-08T00:21:00,327 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 25495306714d4c9bad92e5cf2311c56b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=284, earliestPutTs=1733617258152 2024-12-08T00:21:00,328 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 929f7de467374ac3ba15636d9865842e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=284, earliestPutTs=1733617258152 2024-12-08T00:21:00,328 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting a7956b0a92764ad3a29d5450635a5d82, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=304, earliestPutTs=1733617258775 2024-12-08T00:21:00,328 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 390a39d6fe814533ac6bd5fa121388c7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=304, earliestPutTs=1733617258775 2024-12-08T00:21:00,334 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:00,335 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-08T00:21:00,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:21:00,335 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2837): Flushing d6019a516c33d3d08395be7add424e27 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-08T00:21:00,338 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d6019a516c33d3d08395be7add424e27#B#compaction#323 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:00,339 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/137e9fe51dd54effb4e5bfb591132734 is 50, key is test_row_0/B:col10/1733617259392/Put/seqid=0 2024-12-08T00:21:00,346 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d6019a516c33d3d08395be7add424e27#A#compaction#324 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:00,347 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/503876d1d02146418441816e8ed99867 is 50, key is test_row_0/A:col10/1733617259392/Put/seqid=0 2024-12-08T00:21:00,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=A 2024-12-08T00:21:00,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:00,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=B 2024-12-08T00:21:00,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:00,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=C 2024-12-08T00:21:00,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:00,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742207_1383 (size=13051) 2024-12-08T00:21:00,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/41239c922584427f8f45e1de478ea399 is 50, key is test_row_0/A:col10/1733617259406/Put/seqid=0 2024-12-08T00:21:00,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742208_1384 (size=13051) 2024-12-08T00:21:00,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742209_1385 (size=12301) 2024-12-08T00:21:00,389 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=324 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/41239c922584427f8f45e1de478ea399 2024-12-08T00:21:00,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/e516bd22239840c38b5aab36c6d989ab is 50, key is test_row_0/B:col10/1733617259406/Put/seqid=0 2024-12-08T00:21:00,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742210_1386 (size=12301) 2024-12-08T00:21:00,401 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=324 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/e516bd22239840c38b5aab36c6d989ab 2024-12-08T00:21:00,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/bed8721c6fc247d090a8ed4dd66517f3 is 50, key is test_row_0/C:col10/1733617259406/Put/seqid=0 2024-12-08T00:21:00,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742211_1387 (size=12301) 2024-12-08T00:21:00,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-08T00:21:00,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on d6019a516c33d3d08395be7add424e27 2024-12-08T00:21:00,526 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. as already flushing 2024-12-08T00:21:00,538 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:00,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37772 deadline: 1733617320536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:00,540 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:00,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37732 deadline: 1733617320537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:00,541 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:00,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37790 deadline: 1733617320538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:00,541 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:00,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37748 deadline: 1733617320538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:00,642 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:00,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37772 deadline: 1733617320639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:00,644 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:00,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37732 deadline: 1733617320641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:00,645 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:00,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37790 deadline: 1733617320642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:00,645 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:00,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37748 deadline: 1733617320642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:00,763 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/137e9fe51dd54effb4e5bfb591132734 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/137e9fe51dd54effb4e5bfb591132734 2024-12-08T00:21:00,767 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d6019a516c33d3d08395be7add424e27/B of d6019a516c33d3d08395be7add424e27 into 137e9fe51dd54effb4e5bfb591132734(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:00,767 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d6019a516c33d3d08395be7add424e27: 2024-12-08T00:21:00,767 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27., storeName=d6019a516c33d3d08395be7add424e27/B, priority=13, startTime=1733617260325; duration=0sec 2024-12-08T00:21:00,767 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:21:00,767 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d6019a516c33d3d08395be7add424e27:B 2024-12-08T00:21:00,767 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:21:00,768 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:21:00,768 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): d6019a516c33d3d08395be7add424e27/C is initiating minor compaction (all files) 2024-12-08T00:21:00,768 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d6019a516c33d3d08395be7add424e27/C in TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:21:00,768 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/7974958caad34e5e8fc75534541d0a8e, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/8b50c3b854544d2e88f33c39c7b032ad, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/6249f95c514044b6baeceade7bc2eb88] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp, totalSize=36.7 K 2024-12-08T00:21:00,769 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 7974958caad34e5e8fc75534541d0a8e, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=261, earliestPutTs=1733617258131 2024-12-08T00:21:00,769 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 8b50c3b854544d2e88f33c39c7b032ad, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=284, earliestPutTs=1733617258152 2024-12-08T00:21:00,769 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 6249f95c514044b6baeceade7bc2eb88, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=304, earliestPutTs=1733617258775 2024-12-08T00:21:00,776 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d6019a516c33d3d08395be7add424e27#C#compaction#328 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:00,777 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/22a56dea99c149b6bb12121413a36dc0 is 50, key is test_row_0/C:col10/1733617259392/Put/seqid=0 2024-12-08T00:21:00,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742212_1388 (size=13051) 2024-12-08T00:21:00,798 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/503876d1d02146418441816e8ed99867 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/503876d1d02146418441816e8ed99867 2024-12-08T00:21:00,802 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d6019a516c33d3d08395be7add424e27/A of d6019a516c33d3d08395be7add424e27 into 503876d1d02146418441816e8ed99867(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:00,802 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d6019a516c33d3d08395be7add424e27: 2024-12-08T00:21:00,802 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27., storeName=d6019a516c33d3d08395be7add424e27/A, priority=13, startTime=1733617260325; duration=0sec 2024-12-08T00:21:00,803 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:00,803 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d6019a516c33d3d08395be7add424e27:A 2024-12-08T00:21:00,826 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=324 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/bed8721c6fc247d090a8ed4dd66517f3 2024-12-08T00:21:00,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/41239c922584427f8f45e1de478ea399 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/41239c922584427f8f45e1de478ea399 2024-12-08T00:21:00,834 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/41239c922584427f8f45e1de478ea399, entries=150, sequenceid=324, filesize=12.0 K 2024-12-08T00:21:00,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/e516bd22239840c38b5aab36c6d989ab as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/e516bd22239840c38b5aab36c6d989ab 2024-12-08T00:21:00,838 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/e516bd22239840c38b5aab36c6d989ab, entries=150, sequenceid=324, filesize=12.0 K 2024-12-08T00:21:00,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/bed8721c6fc247d090a8ed4dd66517f3 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/bed8721c6fc247d090a8ed4dd66517f3 2024-12-08T00:21:00,843 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/bed8721c6fc247d090a8ed4dd66517f3, entries=150, sequenceid=324, filesize=12.0 K 2024-12-08T00:21:00,844 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for d6019a516c33d3d08395be7add424e27 in 508ms, sequenceid=324, compaction requested=false 2024-12-08T00:21:00,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2538): Flush status journal for d6019a516c33d3d08395be7add424e27: 2024-12-08T00:21:00,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:21:00,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=86 2024-12-08T00:21:00,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4106): Remote procedure done, pid=86 2024-12-08T00:21:00,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on d6019a516c33d3d08395be7add424e27 2024-12-08T00:21:00,846 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d6019a516c33d3d08395be7add424e27 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-08T00:21:00,847 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=86, resume processing ppid=85 2024-12-08T00:21:00,847 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=86, ppid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4250 sec 2024-12-08T00:21:00,848 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=A 2024-12-08T00:21:00,848 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:00,848 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=B 2024-12-08T00:21:00,848 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:00,848 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=C 2024-12-08T00:21:00,848 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:00,849 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees in 1.4290 sec 2024-12-08T00:21:00,880 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/e957545916514671b3f602a34c31b850 is 50, key is test_row_0/A:col10/1733617260845/Put/seqid=0 2024-12-08T00:21:00,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742213_1389 (size=12301) 2024-12-08T00:21:00,893 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:00,894 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:00,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37772 deadline: 1733617320891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:00,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37790 deadline: 1733617320890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:00,894 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:00,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37732 deadline: 1733617320892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:00,895 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:00,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37748 deadline: 1733617320892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:00,996 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:00,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37790 deadline: 1733617320995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:00,997 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:00,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37772 deadline: 1733617320995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:00,997 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:00,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37732 deadline: 1733617320995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:00,997 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:00,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37748 deadline: 1733617320996, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:01,186 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/22a56dea99c149b6bb12121413a36dc0 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/22a56dea99c149b6bb12121413a36dc0 2024-12-08T00:21:01,191 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d6019a516c33d3d08395be7add424e27/C of d6019a516c33d3d08395be7add424e27 into 22a56dea99c149b6bb12121413a36dc0(size=12.7 K), total size for store is 24.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:01,191 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d6019a516c33d3d08395be7add424e27: 2024-12-08T00:21:01,191 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27., storeName=d6019a516c33d3d08395be7add424e27/C, priority=13, startTime=1733617260325; duration=0sec 2024-12-08T00:21:01,191 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:01,191 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d6019a516c33d3d08395be7add424e27:C 2024-12-08T00:21:01,199 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:01,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37790 deadline: 1733617321197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:01,199 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:01,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37772 deadline: 1733617321198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:01,200 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:01,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37732 deadline: 1733617321198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:01,210 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:01,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37748 deadline: 1733617321209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:01,285 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=344 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/e957545916514671b3f602a34c31b850 2024-12-08T00:21:01,292 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/f9a0189307944c47a5908ec8f75d0f4f is 50, key is test_row_0/B:col10/1733617260845/Put/seqid=0 2024-12-08T00:21:01,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742214_1390 (size=12301) 2024-12-08T00:21:01,502 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:01,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37790 deadline: 1733617321501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:01,503 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:01,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37772 deadline: 1733617321502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:01,504 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:01,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37732 deadline: 1733617321503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:01,513 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:01,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37748 deadline: 1733617321512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:01,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-08T00:21:01,524 INFO [Thread-1424 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 85 completed 2024-12-08T00:21:01,525 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T00:21:01,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees 2024-12-08T00:21:01,527 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:21:01,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-08T00:21:01,527 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:21:01,527 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=88, ppid=87, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:21:01,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-08T00:21:01,678 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:01,678 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-12-08T00:21:01,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:21:01,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. as already flushing 2024-12-08T00:21:01,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:21:01,679 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:01,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:01,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:01,696 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=344 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/f9a0189307944c47a5908ec8f75d0f4f 2024-12-08T00:21:01,704 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/99cebb88f8b2474789359b78f51fdac4 is 50, key is test_row_0/C:col10/1733617260845/Put/seqid=0 2024-12-08T00:21:01,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742215_1391 (size=12301) 2024-12-08T00:21:01,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-08T00:21:01,830 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:01,830 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-12-08T00:21:01,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:21:01,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. as already flushing 2024-12-08T00:21:01,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:21:01,831 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:01,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:01,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:01,983 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:01,984 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-12-08T00:21:01,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:21:01,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. as already flushing 2024-12-08T00:21:01,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:21:01,984 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:01,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:01,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:02,004 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:02,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37790 deadline: 1733617322003, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:02,007 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:02,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37732 deadline: 1733617322006, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:02,009 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:02,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37772 deadline: 1733617322008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:02,017 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:02,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37748 deadline: 1733617322016, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:02,113 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=344 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/99cebb88f8b2474789359b78f51fdac4 2024-12-08T00:21:02,118 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/e957545916514671b3f602a34c31b850 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/e957545916514671b3f602a34c31b850 2024-12-08T00:21:02,125 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/e957545916514671b3f602a34c31b850, entries=150, sequenceid=344, filesize=12.0 K 2024-12-08T00:21:02,126 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/f9a0189307944c47a5908ec8f75d0f4f as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/f9a0189307944c47a5908ec8f75d0f4f 2024-12-08T00:21:02,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-08T00:21:02,136 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:02,137 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-12-08T00:21:02,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:21:02,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. as already flushing 2024-12-08T00:21:02,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:21:02,137 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:02,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:02,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:02,139 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/f9a0189307944c47a5908ec8f75d0f4f, entries=150, sequenceid=344, filesize=12.0 K 2024-12-08T00:21:02,140 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/99cebb88f8b2474789359b78f51fdac4 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/99cebb88f8b2474789359b78f51fdac4 2024-12-08T00:21:02,143 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/99cebb88f8b2474789359b78f51fdac4, entries=150, sequenceid=344, filesize=12.0 K 2024-12-08T00:21:02,144 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=107.34 KB/109920 for d6019a516c33d3d08395be7add424e27 in 1298ms, sequenceid=344, compaction requested=true 2024-12-08T00:21:02,144 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d6019a516c33d3d08395be7add424e27: 2024-12-08T00:21:02,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d6019a516c33d3d08395be7add424e27:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:21:02,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:02,144 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:21:02,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d6019a516c33d3d08395be7add424e27:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T00:21:02,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:02,144 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:21:02,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d6019a516c33d3d08395be7add424e27:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:21:02,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:21:02,145 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:21:02,145 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:21:02,145 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): d6019a516c33d3d08395be7add424e27/B is initiating minor compaction (all files) 2024-12-08T00:21:02,145 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): d6019a516c33d3d08395be7add424e27/A is initiating minor compaction (all files) 2024-12-08T00:21:02,145 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d6019a516c33d3d08395be7add424e27/B in TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:21:02,145 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d6019a516c33d3d08395be7add424e27/A in TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:21:02,145 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/503876d1d02146418441816e8ed99867, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/41239c922584427f8f45e1de478ea399, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/e957545916514671b3f602a34c31b850] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp, totalSize=36.8 K 2024-12-08T00:21:02,145 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/137e9fe51dd54effb4e5bfb591132734, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/e516bd22239840c38b5aab36c6d989ab, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/f9a0189307944c47a5908ec8f75d0f4f] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp, totalSize=36.8 K 2024-12-08T00:21:02,146 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 503876d1d02146418441816e8ed99867, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=304, earliestPutTs=1733617258775 2024-12-08T00:21:02,146 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 137e9fe51dd54effb4e5bfb591132734, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=304, earliestPutTs=1733617258775 2024-12-08T00:21:02,146 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting e516bd22239840c38b5aab36c6d989ab, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=324, earliestPutTs=1733617259406 2024-12-08T00:21:02,146 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 41239c922584427f8f45e1de478ea399, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=324, earliestPutTs=1733617259406 2024-12-08T00:21:02,147 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting f9a0189307944c47a5908ec8f75d0f4f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=344, earliestPutTs=1733617260536 2024-12-08T00:21:02,147 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting e957545916514671b3f602a34c31b850, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=344, earliestPutTs=1733617260536 2024-12-08T00:21:02,158 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d6019a516c33d3d08395be7add424e27#B#compaction#332 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:02,159 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/777ae3b34a864ae1bef349b15f522ace is 50, key is test_row_0/B:col10/1733617260845/Put/seqid=0 2024-12-08T00:21:02,160 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d6019a516c33d3d08395be7add424e27#A#compaction#333 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:02,161 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/a29ef0fd383d424f951d5a642f312055 is 50, key is test_row_0/A:col10/1733617260845/Put/seqid=0 2024-12-08T00:21:02,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742216_1392 (size=13153) 2024-12-08T00:21:02,199 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/777ae3b34a864ae1bef349b15f522ace as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/777ae3b34a864ae1bef349b15f522ace 2024-12-08T00:21:02,204 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d6019a516c33d3d08395be7add424e27/B of d6019a516c33d3d08395be7add424e27 into 777ae3b34a864ae1bef349b15f522ace(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:02,204 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d6019a516c33d3d08395be7add424e27: 2024-12-08T00:21:02,204 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27., storeName=d6019a516c33d3d08395be7add424e27/B, priority=13, startTime=1733617262144; duration=0sec 2024-12-08T00:21:02,204 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:21:02,204 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d6019a516c33d3d08395be7add424e27:B 2024-12-08T00:21:02,204 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:21:02,206 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:21:02,206 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): d6019a516c33d3d08395be7add424e27/C is initiating minor compaction (all files) 2024-12-08T00:21:02,206 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d6019a516c33d3d08395be7add424e27/C in TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:21:02,206 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/22a56dea99c149b6bb12121413a36dc0, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/bed8721c6fc247d090a8ed4dd66517f3, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/99cebb88f8b2474789359b78f51fdac4] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp, totalSize=36.8 K 2024-12-08T00:21:02,206 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 22a56dea99c149b6bb12121413a36dc0, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=304, earliestPutTs=1733617258775 2024-12-08T00:21:02,206 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting bed8721c6fc247d090a8ed4dd66517f3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=324, earliestPutTs=1733617259406 2024-12-08T00:21:02,207 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 99cebb88f8b2474789359b78f51fdac4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=344, earliestPutTs=1733617260536 2024-12-08T00:21:02,213 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d6019a516c33d3d08395be7add424e27#C#compaction#334 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:02,214 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/70c28185d1be46b1b4ffc446c1451b74 is 50, key is test_row_0/C:col10/1733617260845/Put/seqid=0 2024-12-08T00:21:02,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742217_1393 (size=13153) 2024-12-08T00:21:02,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742218_1394 (size=13153) 2024-12-08T00:21:02,289 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:02,290 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-12-08T00:21:02,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:21:02,290 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2837): Flushing d6019a516c33d3d08395be7add424e27 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-08T00:21:02,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=A 2024-12-08T00:21:02,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:02,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=B 2024-12-08T00:21:02,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:02,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=C 2024-12-08T00:21:02,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:02,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/a4040e34da5b49e2b0b0a75da3652218 is 50, key is test_row_0/A:col10/1733617260889/Put/seqid=0 2024-12-08T00:21:02,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742219_1395 (size=12301) 2024-12-08T00:21:02,623 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/70c28185d1be46b1b4ffc446c1451b74 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/70c28185d1be46b1b4ffc446c1451b74 2024-12-08T00:21:02,623 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/a29ef0fd383d424f951d5a642f312055 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/a29ef0fd383d424f951d5a642f312055 2024-12-08T00:21:02,629 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d6019a516c33d3d08395be7add424e27/C of d6019a516c33d3d08395be7add424e27 into 70c28185d1be46b1b4ffc446c1451b74(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:02,629 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d6019a516c33d3d08395be7add424e27: 2024-12-08T00:21:02,629 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27., storeName=d6019a516c33d3d08395be7add424e27/C, priority=13, startTime=1733617262144; duration=0sec 2024-12-08T00:21:02,629 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:02,629 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d6019a516c33d3d08395be7add424e27:C 2024-12-08T00:21:02,630 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d6019a516c33d3d08395be7add424e27/A of d6019a516c33d3d08395be7add424e27 into a29ef0fd383d424f951d5a642f312055(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:02,630 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d6019a516c33d3d08395be7add424e27: 2024-12-08T00:21:02,630 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27., storeName=d6019a516c33d3d08395be7add424e27/A, priority=13, startTime=1733617262144; duration=0sec 2024-12-08T00:21:02,630 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:02,630 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d6019a516c33d3d08395be7add424e27:A 2024-12-08T00:21:02,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-08T00:21:02,704 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=365 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/a4040e34da5b49e2b0b0a75da3652218 2024-12-08T00:21:02,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/bea0f72953d64f0ea4b498af7949299c is 50, key is test_row_0/B:col10/1733617260889/Put/seqid=0 2024-12-08T00:21:02,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742220_1396 (size=12301) 2024-12-08T00:21:02,718 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=365 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/bea0f72953d64f0ea4b498af7949299c 2024-12-08T00:21:02,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/52cc7de5802040a9b38a40290ca031ad is 50, key is test_row_0/C:col10/1733617260889/Put/seqid=0 2024-12-08T00:21:02,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742221_1397 (size=12301) 2024-12-08T00:21:03,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on d6019a516c33d3d08395be7add424e27 2024-12-08T00:21:03,011 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. as already flushing 2024-12-08T00:21:03,054 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:03,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37748 deadline: 1733617323052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:03,054 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:03,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37790 deadline: 1733617323052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:03,055 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:03,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37772 deadline: 1733617323053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:03,055 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:03,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37732 deadline: 1733617323053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:03,130 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=365 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/52cc7de5802040a9b38a40290ca031ad 2024-12-08T00:21:03,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/a4040e34da5b49e2b0b0a75da3652218 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/a4040e34da5b49e2b0b0a75da3652218 2024-12-08T00:21:03,139 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/a4040e34da5b49e2b0b0a75da3652218, entries=150, sequenceid=365, filesize=12.0 K 2024-12-08T00:21:03,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/bea0f72953d64f0ea4b498af7949299c as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/bea0f72953d64f0ea4b498af7949299c 2024-12-08T00:21:03,143 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/bea0f72953d64f0ea4b498af7949299c, entries=150, sequenceid=365, filesize=12.0 K 2024-12-08T00:21:03,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/52cc7de5802040a9b38a40290ca031ad as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/52cc7de5802040a9b38a40290ca031ad 2024-12-08T00:21:03,147 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/52cc7de5802040a9b38a40290ca031ad, entries=150, sequenceid=365, filesize=12.0 K 2024-12-08T00:21:03,148 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for d6019a516c33d3d08395be7add424e27 in 858ms, sequenceid=365, compaction requested=false 2024-12-08T00:21:03,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2538): Flush status journal for d6019a516c33d3d08395be7add424e27: 2024-12-08T00:21:03,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:21:03,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=88 2024-12-08T00:21:03,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4106): Remote procedure done, pid=88 2024-12-08T00:21:03,151 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=88, resume processing ppid=87 2024-12-08T00:21:03,151 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=88, ppid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6230 sec 2024-12-08T00:21:03,152 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees in 1.6260 sec 2024-12-08T00:21:03,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on d6019a516c33d3d08395be7add424e27 2024-12-08T00:21:03,157 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d6019a516c33d3d08395be7add424e27 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-08T00:21:03,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=A 2024-12-08T00:21:03,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:03,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=B 2024-12-08T00:21:03,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:03,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=C 2024-12-08T00:21:03,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:03,163 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/7bc6a396b4ea47d885d096f352092c38 is 50, key is test_row_0/A:col10/1733617263052/Put/seqid=0 2024-12-08T00:21:03,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742222_1398 (size=12301) 2024-12-08T00:21:03,205 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:03,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37748 deadline: 1733617323202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:03,205 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:03,205 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:03,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37772 deadline: 1733617323203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:03,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37732 deadline: 1733617323203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:03,205 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:03,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37790 deadline: 1733617323204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:03,307 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:03,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37748 deadline: 1733617323306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:03,308 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:03,308 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:03,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37732 deadline: 1733617323306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:03,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37772 deadline: 1733617323306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:03,308 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:03,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37790 deadline: 1733617323306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:03,511 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:03,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37790 deadline: 1733617323509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:03,511 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:03,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37748 deadline: 1733617323509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:03,511 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:03,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37772 deadline: 1733617323510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:03,512 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:03,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37732 deadline: 1733617323510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:03,570 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=386 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/7bc6a396b4ea47d885d096f352092c38 2024-12-08T00:21:03,579 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/ae0b843ed7964e7790463589d6c42df6 is 50, key is test_row_0/B:col10/1733617263052/Put/seqid=0 2024-12-08T00:21:03,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742223_1399 (size=12301) 2024-12-08T00:21:03,603 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=386 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/ae0b843ed7964e7790463589d6c42df6 2024-12-08T00:21:03,610 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/ccf9db63f09049e48e4af3d6cb071143 is 50, key is test_row_0/C:col10/1733617263052/Put/seqid=0 2024-12-08T00:21:03,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742224_1400 (size=12301) 2024-12-08T00:21:03,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-08T00:21:03,631 INFO [Thread-1424 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 87 completed 2024-12-08T00:21:03,632 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T00:21:03,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=89, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees 2024-12-08T00:21:03,634 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=89, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:21:03,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-08T00:21:03,634 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=89, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:21:03,635 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:21:03,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-08T00:21:03,790 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:03,791 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-12-08T00:21:03,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:21:03,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. as already flushing 2024-12-08T00:21:03,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:21:03,791 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] handler.RSProcedureHandler(58): pid=90 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:03,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=90 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:03,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=90 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:03,814 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:03,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37790 deadline: 1733617323812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:03,814 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:03,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37772 deadline: 1733617323812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:03,814 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:03,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37732 deadline: 1733617323813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:03,816 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:03,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37748 deadline: 1733617323814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:03,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-08T00:21:03,943 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:03,944 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-12-08T00:21:03,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:21:03,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. as already flushing 2024-12-08T00:21:03,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:21:03,944 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] handler.RSProcedureHandler(58): pid=90 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:03,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=90 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:03,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=90 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:04,018 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=386 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/ccf9db63f09049e48e4af3d6cb071143 2024-12-08T00:21:04,023 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/7bc6a396b4ea47d885d096f352092c38 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/7bc6a396b4ea47d885d096f352092c38 2024-12-08T00:21:04,026 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/7bc6a396b4ea47d885d096f352092c38, entries=150, sequenceid=386, filesize=12.0 K 2024-12-08T00:21:04,027 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/ae0b843ed7964e7790463589d6c42df6 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/ae0b843ed7964e7790463589d6c42df6 2024-12-08T00:21:04,031 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/ae0b843ed7964e7790463589d6c42df6, entries=150, sequenceid=386, filesize=12.0 K 2024-12-08T00:21:04,031 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/ccf9db63f09049e48e4af3d6cb071143 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/ccf9db63f09049e48e4af3d6cb071143 2024-12-08T00:21:04,035 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/ccf9db63f09049e48e4af3d6cb071143, entries=150, sequenceid=386, filesize=12.0 K 2024-12-08T00:21:04,035 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for d6019a516c33d3d08395be7add424e27 in 878ms, sequenceid=386, compaction requested=true 2024-12-08T00:21:04,035 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d6019a516c33d3d08395be7add424e27: 2024-12-08T00:21:04,036 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:21:04,036 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d6019a516c33d3d08395be7add424e27:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:21:04,037 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:04,037 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d6019a516c33d3d08395be7add424e27:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T00:21:04,037 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:21:04,037 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d6019a516c33d3d08395be7add424e27:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:21:04,037 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:21:04,037 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:21:04,037 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:21:04,037 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): d6019a516c33d3d08395be7add424e27/A is initiating minor compaction (all files) 2024-12-08T00:21:04,037 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d6019a516c33d3d08395be7add424e27/A in TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:21:04,037 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/a29ef0fd383d424f951d5a642f312055, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/a4040e34da5b49e2b0b0a75da3652218, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/7bc6a396b4ea47d885d096f352092c38] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp, totalSize=36.9 K 2024-12-08T00:21:04,037 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting a29ef0fd383d424f951d5a642f312055, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=344, earliestPutTs=1733617260536 2024-12-08T00:21:04,038 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:21:04,038 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): d6019a516c33d3d08395be7add424e27/B is initiating minor compaction (all files) 2024-12-08T00:21:04,038 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d6019a516c33d3d08395be7add424e27/B in TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:21:04,038 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/777ae3b34a864ae1bef349b15f522ace, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/bea0f72953d64f0ea4b498af7949299c, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/ae0b843ed7964e7790463589d6c42df6] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp, totalSize=36.9 K 2024-12-08T00:21:04,038 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting a4040e34da5b49e2b0b0a75da3652218, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=365, earliestPutTs=1733617260889 2024-12-08T00:21:04,038 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 777ae3b34a864ae1bef349b15f522ace, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=344, earliestPutTs=1733617260536 2024-12-08T00:21:04,039 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7bc6a396b4ea47d885d096f352092c38, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=386, earliestPutTs=1733617263052 2024-12-08T00:21:04,039 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting bea0f72953d64f0ea4b498af7949299c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=365, earliestPutTs=1733617260889 2024-12-08T00:21:04,039 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting ae0b843ed7964e7790463589d6c42df6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=386, earliestPutTs=1733617263052 2024-12-08T00:21:04,046 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d6019a516c33d3d08395be7add424e27#A#compaction#341 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:04,046 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/70f87bb59380456abe2f06330edd91e4 is 50, key is test_row_0/A:col10/1733617263052/Put/seqid=0 2024-12-08T00:21:04,047 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d6019a516c33d3d08395be7add424e27#B#compaction#342 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:04,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742225_1401 (size=13255) 2024-12-08T00:21:04,053 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/a24df743301645468c1133136ae7a170 is 50, key is test_row_0/B:col10/1733617263052/Put/seqid=0 2024-12-08T00:21:04,056 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/70f87bb59380456abe2f06330edd91e4 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/70f87bb59380456abe2f06330edd91e4 2024-12-08T00:21:04,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742226_1402 (size=13255) 2024-12-08T00:21:04,062 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d6019a516c33d3d08395be7add424e27/A of d6019a516c33d3d08395be7add424e27 into 70f87bb59380456abe2f06330edd91e4(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:04,062 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d6019a516c33d3d08395be7add424e27: 2024-12-08T00:21:04,062 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27., storeName=d6019a516c33d3d08395be7add424e27/A, priority=13, startTime=1733617264036; duration=0sec 2024-12-08T00:21:04,062 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:21:04,062 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d6019a516c33d3d08395be7add424e27:A 2024-12-08T00:21:04,062 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:21:04,063 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:21:04,063 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): d6019a516c33d3d08395be7add424e27/C is initiating minor compaction (all files) 2024-12-08T00:21:04,063 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d6019a516c33d3d08395be7add424e27/C in TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:21:04,063 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/70c28185d1be46b1b4ffc446c1451b74, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/52cc7de5802040a9b38a40290ca031ad, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/ccf9db63f09049e48e4af3d6cb071143] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp, totalSize=36.9 K 2024-12-08T00:21:04,063 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 70c28185d1be46b1b4ffc446c1451b74, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=344, earliestPutTs=1733617260536 2024-12-08T00:21:04,063 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 52cc7de5802040a9b38a40290ca031ad, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=365, earliestPutTs=1733617260889 2024-12-08T00:21:04,064 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting ccf9db63f09049e48e4af3d6cb071143, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=386, earliestPutTs=1733617263052 2024-12-08T00:21:04,070 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d6019a516c33d3d08395be7add424e27#C#compaction#343 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:04,070 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/2ad3977d3a6d412db69c4b6e3327f84d is 50, key is test_row_0/C:col10/1733617263052/Put/seqid=0 2024-12-08T00:21:04,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742227_1403 (size=13255) 2024-12-08T00:21:04,089 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/2ad3977d3a6d412db69c4b6e3327f84d as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/2ad3977d3a6d412db69c4b6e3327f84d 2024-12-08T00:21:04,094 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d6019a516c33d3d08395be7add424e27/C of d6019a516c33d3d08395be7add424e27 into 2ad3977d3a6d412db69c4b6e3327f84d(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:04,094 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d6019a516c33d3d08395be7add424e27: 2024-12-08T00:21:04,094 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27., storeName=d6019a516c33d3d08395be7add424e27/C, priority=13, startTime=1733617264037; duration=0sec 2024-12-08T00:21:04,094 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:04,094 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d6019a516c33d3d08395be7add424e27:C 2024-12-08T00:21:04,096 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:04,097 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-12-08T00:21:04,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:21:04,097 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2837): Flushing d6019a516c33d3d08395be7add424e27 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-08T00:21:04,098 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=A 2024-12-08T00:21:04,098 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:04,098 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=B 2024-12-08T00:21:04,098 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:04,098 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=C 2024-12-08T00:21:04,098 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:04,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/f1c04660e9d542c58eb24844609260f9 is 50, key is test_row_0/A:col10/1733617263203/Put/seqid=0 2024-12-08T00:21:04,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742228_1404 (size=12301) 2024-12-08T00:21:04,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-08T00:21:04,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on d6019a516c33d3d08395be7add424e27 2024-12-08T00:21:04,318 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. as already flushing 2024-12-08T00:21:04,333 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:04,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37790 deadline: 1733617324330, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:04,334 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:04,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37748 deadline: 1733617324332, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:04,335 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:04,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37772 deadline: 1733617324333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:04,335 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:04,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37732 deadline: 1733617324333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:04,436 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:04,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37790 deadline: 1733617324434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:04,437 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:04,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37748 deadline: 1733617324435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:04,437 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:04,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37772 deadline: 1733617324436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:04,438 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:04,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37732 deadline: 1733617324436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:04,463 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/a24df743301645468c1133136ae7a170 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/a24df743301645468c1133136ae7a170 2024-12-08T00:21:04,468 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d6019a516c33d3d08395be7add424e27/B of d6019a516c33d3d08395be7add424e27 into a24df743301645468c1133136ae7a170(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:04,468 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d6019a516c33d3d08395be7add424e27: 2024-12-08T00:21:04,468 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27., storeName=d6019a516c33d3d08395be7add424e27/B, priority=13, startTime=1733617264037; duration=0sec 2024-12-08T00:21:04,468 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:04,468 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d6019a516c33d3d08395be7add424e27:B 2024-12-08T00:21:04,513 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=405 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/f1c04660e9d542c58eb24844609260f9 2024-12-08T00:21:04,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/ef257cef6b7c4030a9ee4a87eb8dbffb is 50, key is test_row_0/B:col10/1733617263203/Put/seqid=0 2024-12-08T00:21:04,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742229_1405 (size=12301) 2024-12-08T00:21:04,639 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:04,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37790 deadline: 1733617324637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:04,639 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:04,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37748 deadline: 1733617324638, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:04,640 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:04,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37732 deadline: 1733617324638, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:04,641 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:04,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37772 deadline: 1733617324639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:04,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-08T00:21:04,929 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=405 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/ef257cef6b7c4030a9ee4a87eb8dbffb 2024-12-08T00:21:04,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/a98a8ac9a0c9440d84930a8903fa39f9 is 50, key is test_row_0/C:col10/1733617263203/Put/seqid=0 2024-12-08T00:21:04,942 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:04,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37732 deadline: 1733617324941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:04,943 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:04,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37748 deadline: 1733617324941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:04,943 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:04,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37790 deadline: 1733617324942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:04,946 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:04,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37772 deadline: 1733617324944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:04,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742230_1406 (size=12301) 2024-12-08T00:21:05,350 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=405 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/a98a8ac9a0c9440d84930a8903fa39f9 2024-12-08T00:21:05,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/f1c04660e9d542c58eb24844609260f9 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/f1c04660e9d542c58eb24844609260f9 2024-12-08T00:21:05,358 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/f1c04660e9d542c58eb24844609260f9, entries=150, sequenceid=405, filesize=12.0 K 2024-12-08T00:21:05,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/ef257cef6b7c4030a9ee4a87eb8dbffb as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/ef257cef6b7c4030a9ee4a87eb8dbffb 2024-12-08T00:21:05,362 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/ef257cef6b7c4030a9ee4a87eb8dbffb, entries=150, sequenceid=405, filesize=12.0 K 2024-12-08T00:21:05,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/a98a8ac9a0c9440d84930a8903fa39f9 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/a98a8ac9a0c9440d84930a8903fa39f9 2024-12-08T00:21:05,367 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/a98a8ac9a0c9440d84930a8903fa39f9, entries=150, sequenceid=405, filesize=12.0 K 2024-12-08T00:21:05,368 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=114.05 KB/116790 for d6019a516c33d3d08395be7add424e27 in 1271ms, sequenceid=405, compaction requested=false 2024-12-08T00:21:05,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2538): Flush status journal for d6019a516c33d3d08395be7add424e27: 2024-12-08T00:21:05,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:21:05,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=90 2024-12-08T00:21:05,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4106): Remote procedure done, pid=90 2024-12-08T00:21:05,371 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=90, resume processing ppid=89 2024-12-08T00:21:05,371 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=90, ppid=89, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7350 sec 2024-12-08T00:21:05,373 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=89, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees in 1.7390 sec 2024-12-08T00:21:05,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on d6019a516c33d3d08395be7add424e27 2024-12-08T00:21:05,451 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d6019a516c33d3d08395be7add424e27 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-08T00:21:05,452 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=A 2024-12-08T00:21:05,452 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:05,452 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=B 2024-12-08T00:21:05,452 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:05,452 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=C 2024-12-08T00:21:05,452 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:05,456 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/e82f8cc0588d4785843c132501697452 is 50, key is test_row_0/A:col10/1733617265447/Put/seqid=0 2024-12-08T00:21:05,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742231_1407 (size=14741) 2024-12-08T00:21:05,462 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:05,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37772 deadline: 1733617325460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:05,463 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:05,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37748 deadline: 1733617325461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:05,464 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:05,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37790 deadline: 1733617325461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:05,464 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:05,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37732 deadline: 1733617325462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:05,565 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:05,565 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:05,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37772 deadline: 1733617325563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:05,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37748 deadline: 1733617325564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:05,566 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:05,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37790 deadline: 1733617325565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:05,568 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:05,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37732 deadline: 1733617325566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:05,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-08T00:21:05,738 INFO [Thread-1424 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 89 completed 2024-12-08T00:21:05,739 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T00:21:05,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=91, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees 2024-12-08T00:21:05,741 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=91, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:21:05,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-08T00:21:05,741 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=91, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:21:05,742 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=92, ppid=91, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:21:05,768 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:05,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37748 deadline: 1733617325766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:05,768 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:05,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37772 deadline: 1733617325767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:05,769 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:05,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37790 deadline: 1733617325768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:05,772 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:05,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37732 deadline: 1733617325770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:05,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-08T00:21:05,866 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=430 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/e82f8cc0588d4785843c132501697452 2024-12-08T00:21:05,873 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/3c4dc13230dc48e18a78ce4d41e5cc8a is 50, key is test_row_0/B:col10/1733617265447/Put/seqid=0 2024-12-08T00:21:05,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742232_1408 (size=12301) 2024-12-08T00:21:05,893 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:05,893 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-12-08T00:21:05,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:21:05,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. as already flushing 2024-12-08T00:21:05,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:21:05,894 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:05,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:05,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:06,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-08T00:21:06,046 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:06,046 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-12-08T00:21:06,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:21:06,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. as already flushing 2024-12-08T00:21:06,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:21:06,047 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:06,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:06,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:06,071 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:06,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37772 deadline: 1733617326069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:06,072 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:06,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37748 deadline: 1733617326071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:06,072 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:06,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37790 deadline: 1733617326071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:06,073 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:06,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37732 deadline: 1733617326073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:06,159 DEBUG [Thread-1433 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1730a60f to 127.0.0.1:62287 2024-12-08T00:21:06,159 DEBUG [Thread-1433 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:21:06,159 DEBUG [Thread-1431 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x10e6bf6a to 127.0.0.1:62287 2024-12-08T00:21:06,159 DEBUG [Thread-1431 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:21:06,160 DEBUG [Thread-1429 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0d68f787 to 127.0.0.1:62287 2024-12-08T00:21:06,160 DEBUG [Thread-1429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:21:06,161 DEBUG [Thread-1425 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0eb04aeb to 127.0.0.1:62287 2024-12-08T00:21:06,161 DEBUG [Thread-1425 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:21:06,161 DEBUG [Thread-1427 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6a0e9c8f to 127.0.0.1:62287 2024-12-08T00:21:06,161 DEBUG [Thread-1427 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:21:06,202 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:06,202 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-12-08T00:21:06,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:21:06,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. as already flushing 2024-12-08T00:21:06,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:21:06,203 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:06,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:06,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:06,282 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=430 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/3c4dc13230dc48e18a78ce4d41e5cc8a 2024-12-08T00:21:06,288 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/8994bcd3e2fc4ee5881397c9f8217fcc is 50, key is test_row_0/C:col10/1733617265447/Put/seqid=0 2024-12-08T00:21:06,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742233_1409 (size=12301) 2024-12-08T00:21:06,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-08T00:21:06,354 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:06,355 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-12-08T00:21:06,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:21:06,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. as already flushing 2024-12-08T00:21:06,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:21:06,355 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:06,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:06,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:06,507 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:06,508 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-12-08T00:21:06,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:21:06,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. as already flushing 2024-12-08T00:21:06,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:21:06,508 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:06,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:06,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:06,573 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:06,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37790 deadline: 1733617326573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:06,575 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:06,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37772 deadline: 1733617326575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:06,575 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:06,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37732 deadline: 1733617326575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:06,576 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:06,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37748 deadline: 1733617326575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:06,660 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:06,660 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-12-08T00:21:06,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:21:06,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. as already flushing 2024-12-08T00:21:06,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:21:06,661 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:06,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:06,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:06,691 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=430 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/8994bcd3e2fc4ee5881397c9f8217fcc 2024-12-08T00:21:06,695 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/e82f8cc0588d4785843c132501697452 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/e82f8cc0588d4785843c132501697452 2024-12-08T00:21:06,698 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/e82f8cc0588d4785843c132501697452, entries=200, sequenceid=430, filesize=14.4 K 2024-12-08T00:21:06,699 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/3c4dc13230dc48e18a78ce4d41e5cc8a as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/3c4dc13230dc48e18a78ce4d41e5cc8a 2024-12-08T00:21:06,701 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/3c4dc13230dc48e18a78ce4d41e5cc8a, entries=150, sequenceid=430, filesize=12.0 K 2024-12-08T00:21:06,702 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/8994bcd3e2fc4ee5881397c9f8217fcc as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/8994bcd3e2fc4ee5881397c9f8217fcc 2024-12-08T00:21:06,705 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/8994bcd3e2fc4ee5881397c9f8217fcc, entries=150, sequenceid=430, filesize=12.0 K 2024-12-08T00:21:06,706 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for d6019a516c33d3d08395be7add424e27 in 1254ms, sequenceid=430, compaction requested=true 2024-12-08T00:21:06,706 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d6019a516c33d3d08395be7add424e27: 2024-12-08T00:21:06,706 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d6019a516c33d3d08395be7add424e27:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:21:06,706 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:06,706 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d6019a516c33d3d08395be7add424e27:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T00:21:06,706 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:06,706 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:21:06,706 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d6019a516c33d3d08395be7add424e27:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:21:06,706 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:21:06,706 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:21:06,707 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37857 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:21:06,707 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40297 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:21:06,707 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): d6019a516c33d3d08395be7add424e27/A is initiating minor compaction (all files) 2024-12-08T00:21:06,707 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): d6019a516c33d3d08395be7add424e27/B is initiating minor compaction (all files) 2024-12-08T00:21:06,707 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d6019a516c33d3d08395be7add424e27/A in TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:21:06,707 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d6019a516c33d3d08395be7add424e27/B in TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:21:06,707 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/a24df743301645468c1133136ae7a170, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/ef257cef6b7c4030a9ee4a87eb8dbffb, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/3c4dc13230dc48e18a78ce4d41e5cc8a] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp, totalSize=37.0 K 2024-12-08T00:21:06,707 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/70f87bb59380456abe2f06330edd91e4, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/f1c04660e9d542c58eb24844609260f9, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/e82f8cc0588d4785843c132501697452] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp, totalSize=39.4 K 2024-12-08T00:21:06,707 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting a24df743301645468c1133136ae7a170, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=386, earliestPutTs=1733617263052 2024-12-08T00:21:06,707 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 70f87bb59380456abe2f06330edd91e4, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=386, earliestPutTs=1733617263052 2024-12-08T00:21:06,707 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting ef257cef6b7c4030a9ee4a87eb8dbffb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=405, earliestPutTs=1733617263201 2024-12-08T00:21:06,707 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting f1c04660e9d542c58eb24844609260f9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=405, earliestPutTs=1733617263201 2024-12-08T00:21:06,708 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting e82f8cc0588d4785843c132501697452, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=430, earliestPutTs=1733617264328 2024-12-08T00:21:06,708 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 3c4dc13230dc48e18a78ce4d41e5cc8a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=430, earliestPutTs=1733617265447 2024-12-08T00:21:06,713 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d6019a516c33d3d08395be7add424e27#A#compaction#350 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:06,714 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/ec5499e9a7d74983a400ff1b6ef4d8c0 is 50, key is test_row_0/A:col10/1733617265447/Put/seqid=0 2024-12-08T00:21:06,714 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d6019a516c33d3d08395be7add424e27#B#compaction#351 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:06,715 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/ef02bab3276643d9b4dba98d79b23a6e is 50, key is test_row_0/B:col10/1733617265447/Put/seqid=0 2024-12-08T00:21:06,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742234_1410 (size=13357) 2024-12-08T00:21:06,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742235_1411 (size=13357) 2024-12-08T00:21:06,813 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:06,813 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-12-08T00:21:06,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:21:06,814 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2837): Flushing d6019a516c33d3d08395be7add424e27 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-08T00:21:06,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=A 2024-12-08T00:21:06,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:06,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=B 2024-12-08T00:21:06,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:06,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=C 2024-12-08T00:21:06,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:06,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/999c4a77c72f44f58548d2664bd07d9c is 50, key is test_row_0/A:col10/1733617265461/Put/seqid=0 2024-12-08T00:21:06,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742236_1412 (size=12301) 2024-12-08T00:21:06,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-08T00:21:07,124 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/ec5499e9a7d74983a400ff1b6ef4d8c0 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/ec5499e9a7d74983a400ff1b6ef4d8c0 2024-12-08T00:21:07,124 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/ef02bab3276643d9b4dba98d79b23a6e as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/ef02bab3276643d9b4dba98d79b23a6e 2024-12-08T00:21:07,128 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d6019a516c33d3d08395be7add424e27/B of d6019a516c33d3d08395be7add424e27 into ef02bab3276643d9b4dba98d79b23a6e(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:07,128 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d6019a516c33d3d08395be7add424e27/A of d6019a516c33d3d08395be7add424e27 into ec5499e9a7d74983a400ff1b6ef4d8c0(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:07,128 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d6019a516c33d3d08395be7add424e27: 2024-12-08T00:21:07,128 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d6019a516c33d3d08395be7add424e27: 2024-12-08T00:21:07,128 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27., storeName=d6019a516c33d3d08395be7add424e27/B, priority=13, startTime=1733617266706; duration=0sec 2024-12-08T00:21:07,128 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27., storeName=d6019a516c33d3d08395be7add424e27/A, priority=13, startTime=1733617266706; duration=0sec 2024-12-08T00:21:07,128 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:21:07,128 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d6019a516c33d3d08395be7add424e27:B 2024-12-08T00:21:07,128 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:07,128 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:21:07,128 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d6019a516c33d3d08395be7add424e27:A 2024-12-08T00:21:07,129 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37857 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:21:07,129 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): d6019a516c33d3d08395be7add424e27/C is initiating minor compaction (all files) 2024-12-08T00:21:07,129 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d6019a516c33d3d08395be7add424e27/C in TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:21:07,129 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/2ad3977d3a6d412db69c4b6e3327f84d, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/a98a8ac9a0c9440d84930a8903fa39f9, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/8994bcd3e2fc4ee5881397c9f8217fcc] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp, totalSize=37.0 K 2024-12-08T00:21:07,130 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 2ad3977d3a6d412db69c4b6e3327f84d, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=386, earliestPutTs=1733617263052 2024-12-08T00:21:07,130 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting a98a8ac9a0c9440d84930a8903fa39f9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=405, earliestPutTs=1733617263201 2024-12-08T00:21:07,130 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 8994bcd3e2fc4ee5881397c9f8217fcc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=430, earliestPutTs=1733617265447 2024-12-08T00:21:07,137 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d6019a516c33d3d08395be7add424e27#C#compaction#353 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:07,137 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/eda2263c94174fde90b553fa9d6a7fb7 is 50, key is test_row_0/C:col10/1733617265447/Put/seqid=0 2024-12-08T00:21:07,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742237_1413 (size=13357) 2024-12-08T00:21:07,222 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=442 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/999c4a77c72f44f58548d2664bd07d9c 2024-12-08T00:21:07,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/ecec68678be4415a890ed6fb170436ae is 50, key is test_row_0/B:col10/1733617265461/Put/seqid=0 2024-12-08T00:21:07,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742238_1414 (size=12301) 2024-12-08T00:21:07,545 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/eda2263c94174fde90b553fa9d6a7fb7 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/eda2263c94174fde90b553fa9d6a7fb7 2024-12-08T00:21:07,548 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d6019a516c33d3d08395be7add424e27/C of d6019a516c33d3d08395be7add424e27 into eda2263c94174fde90b553fa9d6a7fb7(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:07,548 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d6019a516c33d3d08395be7add424e27: 2024-12-08T00:21:07,548 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27., storeName=d6019a516c33d3d08395be7add424e27/C, priority=13, startTime=1733617266706; duration=0sec 2024-12-08T00:21:07,548 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:07,549 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d6019a516c33d3d08395be7add424e27:C 2024-12-08T00:21:07,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on d6019a516c33d3d08395be7add424e27 2024-12-08T00:21:07,579 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. as already flushing 2024-12-08T00:21:07,579 DEBUG [Thread-1416 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0d296fed to 127.0.0.1:62287 2024-12-08T00:21:07,579 DEBUG [Thread-1416 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:21:07,622 DEBUG [Thread-1418 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x08d0caa5 to 127.0.0.1:62287 2024-12-08T00:21:07,622 DEBUG [Thread-1422 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x560ec309 to 127.0.0.1:62287 2024-12-08T00:21:07,622 DEBUG [Thread-1420 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x43f04e0e to 127.0.0.1:62287 2024-12-08T00:21:07,622 DEBUG [Thread-1418 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:21:07,622 DEBUG [Thread-1420 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:21:07,622 DEBUG [Thread-1422 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:21:07,632 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=442 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/ecec68678be4415a890ed6fb170436ae 2024-12-08T00:21:07,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/8cfc110b0d6747fab0121074697a6d9c is 50, key is test_row_0/C:col10/1733617265461/Put/seqid=0 2024-12-08T00:21:07,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742239_1415 (size=12301) 2024-12-08T00:21:07,705 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-08T00:21:07,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-08T00:21:08,042 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=442 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/8cfc110b0d6747fab0121074697a6d9c 2024-12-08T00:21:08,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/999c4a77c72f44f58548d2664bd07d9c as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/999c4a77c72f44f58548d2664bd07d9c 2024-12-08T00:21:08,049 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/999c4a77c72f44f58548d2664bd07d9c, entries=150, sequenceid=442, filesize=12.0 K 2024-12-08T00:21:08,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/ecec68678be4415a890ed6fb170436ae as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/ecec68678be4415a890ed6fb170436ae 2024-12-08T00:21:08,052 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/ecec68678be4415a890ed6fb170436ae, entries=150, sequenceid=442, filesize=12.0 K 2024-12-08T00:21:08,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/8cfc110b0d6747fab0121074697a6d9c as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/8cfc110b0d6747fab0121074697a6d9c 2024-12-08T00:21:08,055 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/8cfc110b0d6747fab0121074697a6d9c, entries=150, sequenceid=442, filesize=12.0 K 2024-12-08T00:21:08,056 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=26.84 KB/27480 for d6019a516c33d3d08395be7add424e27 in 1242ms, sequenceid=442, compaction requested=false 2024-12-08T00:21:08,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2538): Flush status journal for d6019a516c33d3d08395be7add424e27: 2024-12-08T00:21:08,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:21:08,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=92 2024-12-08T00:21:08,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4106): Remote procedure done, pid=92 2024-12-08T00:21:08,058 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=92, resume processing ppid=91 2024-12-08T00:21:08,058 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=92, ppid=91, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3150 sec 2024-12-08T00:21:08,059 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=91, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees in 2.3190 sec 2024-12-08T00:21:08,381 DEBUG [Thread-1414 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6862e3ce to 127.0.0.1:62287 2024-12-08T00:21:08,381 DEBUG [Thread-1414 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:21:09,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-08T00:21:09,846 INFO [Thread-1424 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 91 completed 2024-12-08T00:21:09,847 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-08T00:21:09,847 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 62 2024-12-08T00:21:09,847 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 73 2024-12-08T00:21:09,847 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 73 2024-12-08T00:21:09,847 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 70 2024-12-08T00:21:09,847 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 72 2024-12-08T00:21:09,847 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-08T00:21:09,847 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6430 2024-12-08T00:21:09,847 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6466 2024-12-08T00:21:09,847 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6470 2024-12-08T00:21:09,847 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6499 2024-12-08T00:21:09,847 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6476 2024-12-08T00:21:09,847 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-08T00:21:09,847 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-08T00:21:09,847 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3a569490 to 127.0.0.1:62287 2024-12-08T00:21:09,847 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:21:09,848 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-08T00:21:09,848 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-08T00:21:09,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=93, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-08T00:21:09,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-12-08T00:21:09,850 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733617269850"}]},"ts":"1733617269850"} 2024-12-08T00:21:09,852 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-08T00:21:09,854 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-08T00:21:09,854 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=94, ppid=93, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-08T00:21:09,855 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d6019a516c33d3d08395be7add424e27, UNASSIGN}] 2024-12-08T00:21:09,856 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d6019a516c33d3d08395be7add424e27, UNASSIGN 2024-12-08T00:21:09,856 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=95 updating hbase:meta row=d6019a516c33d3d08395be7add424e27, regionState=CLOSING, regionLocation=017dd09fb407,36703,1733617179335 2024-12-08T00:21:09,857 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-08T00:21:09,857 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=96, ppid=95, state=RUNNABLE; CloseRegionProcedure d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335}] 2024-12-08T00:21:09,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-12-08T00:21:10,008 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:10,008 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(124): Close d6019a516c33d3d08395be7add424e27 2024-12-08T00:21:10,009 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-08T00:21:10,009 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1681): Closing d6019a516c33d3d08395be7add424e27, disabling compactions & flushes 2024-12-08T00:21:10,009 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:21:10,009 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:21:10,009 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. after waiting 0 ms 2024-12-08T00:21:10,009 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:21:10,009 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(2837): Flushing d6019a516c33d3d08395be7add424e27 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-08T00:21:10,009 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=A 2024-12-08T00:21:10,009 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:10,009 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=B 2024-12-08T00:21:10,009 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:10,009 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6019a516c33d3d08395be7add424e27, store=C 2024-12-08T00:21:10,009 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:10,012 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/851693d386ed401181082a1fae00ce98 is 50, key is test_row_0/A:col10/1733617268380/Put/seqid=0 2024-12-08T00:21:10,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742240_1416 (size=9857) 2024-12-08T00:21:10,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-12-08T00:21:10,416 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=453 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/851693d386ed401181082a1fae00ce98 2024-12-08T00:21:10,424 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/2c5022d1d12346e09dc04fd0abc22ede is 50, key is test_row_0/B:col10/1733617268380/Put/seqid=0 2024-12-08T00:21:10,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742241_1417 (size=9857) 2024-12-08T00:21:10,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-12-08T00:21:10,828 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=453 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/2c5022d1d12346e09dc04fd0abc22ede 2024-12-08T00:21:10,833 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/77aa139df96849b5a02a88b778fa3f15 is 50, key is test_row_0/C:col10/1733617268380/Put/seqid=0 2024-12-08T00:21:10,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742242_1418 (size=9857) 2024-12-08T00:21:10,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-12-08T00:21:11,237 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=453 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/77aa139df96849b5a02a88b778fa3f15 2024-12-08T00:21:11,241 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/A/851693d386ed401181082a1fae00ce98 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/851693d386ed401181082a1fae00ce98 2024-12-08T00:21:11,244 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/851693d386ed401181082a1fae00ce98, entries=100, sequenceid=453, filesize=9.6 K 2024-12-08T00:21:11,244 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/B/2c5022d1d12346e09dc04fd0abc22ede as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/2c5022d1d12346e09dc04fd0abc22ede 2024-12-08T00:21:11,247 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/2c5022d1d12346e09dc04fd0abc22ede, entries=100, sequenceid=453, filesize=9.6 K 2024-12-08T00:21:11,248 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/.tmp/C/77aa139df96849b5a02a88b778fa3f15 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/77aa139df96849b5a02a88b778fa3f15 2024-12-08T00:21:11,250 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/77aa139df96849b5a02a88b778fa3f15, entries=100, sequenceid=453, filesize=9.6 K 2024-12-08T00:21:11,251 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for d6019a516c33d3d08395be7add424e27 in 1242ms, sequenceid=453, compaction requested=true 2024-12-08T00:21:11,251 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/594419844177417ebb9c42cf6019af4a, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/46979a23024f4c18bd751aed22be1f0b, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/163ed51cb8ae42efab436b216648e95a, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/d84710f8762946ed9cd1016be69aba06, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/210e34102b734265a52a4a799ac40ab2, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/16c62cbedbe94962b016de25b5b75f16, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/f7b7cb2bc81640f780b7c3172676e2bc, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/8c49fbbd6a9c4b65925812afd2913135, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/61a52732452d4f43aedc696c86ad98a2, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/88d11d6424cc4a4f8c45455a1c5b04a4, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/a0cdcb07ff444898bde7b91d39610451, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/24955d4aab9342ff89917a96bc4fdd9e, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/3410c0a22dc9430fa037951afe82813c, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/9cda4a7bba8f458d8001edbe66f310e5, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/d33cccc8338e4a22ba36052af6eace73, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/5d5a8f36f1c9412eb129776a6568eee3, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/2e6fe6e4f169405fb94035d02bc62ecf, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/35da137b21bf4fdda3d656641c6407ac, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/9e8ef21bff594ac0b45c953749f41688, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/26f7ac88ca4a49228861c1744e8c4867, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/929f7de467374ac3ba15636d9865842e, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/503876d1d02146418441816e8ed99867, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/390a39d6fe814533ac6bd5fa121388c7, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/41239c922584427f8f45e1de478ea399, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/a29ef0fd383d424f951d5a642f312055, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/e957545916514671b3f602a34c31b850, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/a4040e34da5b49e2b0b0a75da3652218, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/70f87bb59380456abe2f06330edd91e4, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/7bc6a396b4ea47d885d096f352092c38, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/f1c04660e9d542c58eb24844609260f9, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/e82f8cc0588d4785843c132501697452] to archive 2024-12-08T00:21:11,252 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-08T00:21:11,254 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/594419844177417ebb9c42cf6019af4a to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/594419844177417ebb9c42cf6019af4a 2024-12-08T00:21:11,255 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/46979a23024f4c18bd751aed22be1f0b to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/46979a23024f4c18bd751aed22be1f0b 2024-12-08T00:21:11,256 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/163ed51cb8ae42efab436b216648e95a to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/163ed51cb8ae42efab436b216648e95a 2024-12-08T00:21:11,256 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/d84710f8762946ed9cd1016be69aba06 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/d84710f8762946ed9cd1016be69aba06 2024-12-08T00:21:11,257 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/210e34102b734265a52a4a799ac40ab2 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/210e34102b734265a52a4a799ac40ab2 2024-12-08T00:21:11,258 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/16c62cbedbe94962b016de25b5b75f16 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/16c62cbedbe94962b016de25b5b75f16 2024-12-08T00:21:11,259 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/f7b7cb2bc81640f780b7c3172676e2bc to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/f7b7cb2bc81640f780b7c3172676e2bc 2024-12-08T00:21:11,260 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/8c49fbbd6a9c4b65925812afd2913135 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/8c49fbbd6a9c4b65925812afd2913135 2024-12-08T00:21:11,260 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/61a52732452d4f43aedc696c86ad98a2 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/61a52732452d4f43aedc696c86ad98a2 2024-12-08T00:21:11,261 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/88d11d6424cc4a4f8c45455a1c5b04a4 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/88d11d6424cc4a4f8c45455a1c5b04a4 2024-12-08T00:21:11,262 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/a0cdcb07ff444898bde7b91d39610451 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/a0cdcb07ff444898bde7b91d39610451 2024-12-08T00:21:11,263 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/24955d4aab9342ff89917a96bc4fdd9e to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/24955d4aab9342ff89917a96bc4fdd9e 2024-12-08T00:21:11,263 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/3410c0a22dc9430fa037951afe82813c to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/3410c0a22dc9430fa037951afe82813c 2024-12-08T00:21:11,264 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/9cda4a7bba8f458d8001edbe66f310e5 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/9cda4a7bba8f458d8001edbe66f310e5 2024-12-08T00:21:11,265 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/d33cccc8338e4a22ba36052af6eace73 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/d33cccc8338e4a22ba36052af6eace73 2024-12-08T00:21:11,266 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/5d5a8f36f1c9412eb129776a6568eee3 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/5d5a8f36f1c9412eb129776a6568eee3 2024-12-08T00:21:11,267 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/2e6fe6e4f169405fb94035d02bc62ecf to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/2e6fe6e4f169405fb94035d02bc62ecf 2024-12-08T00:21:11,268 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/35da137b21bf4fdda3d656641c6407ac to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/35da137b21bf4fdda3d656641c6407ac 2024-12-08T00:21:11,269 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/9e8ef21bff594ac0b45c953749f41688 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/9e8ef21bff594ac0b45c953749f41688 2024-12-08T00:21:11,269 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/26f7ac88ca4a49228861c1744e8c4867 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/26f7ac88ca4a49228861c1744e8c4867 2024-12-08T00:21:11,270 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/929f7de467374ac3ba15636d9865842e to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/929f7de467374ac3ba15636d9865842e 2024-12-08T00:21:11,271 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/503876d1d02146418441816e8ed99867 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/503876d1d02146418441816e8ed99867 2024-12-08T00:21:11,272 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/390a39d6fe814533ac6bd5fa121388c7 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/390a39d6fe814533ac6bd5fa121388c7 2024-12-08T00:21:11,273 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/41239c922584427f8f45e1de478ea399 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/41239c922584427f8f45e1de478ea399 2024-12-08T00:21:11,274 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/a29ef0fd383d424f951d5a642f312055 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/a29ef0fd383d424f951d5a642f312055 2024-12-08T00:21:11,275 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/e957545916514671b3f602a34c31b850 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/e957545916514671b3f602a34c31b850 2024-12-08T00:21:11,275 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/a4040e34da5b49e2b0b0a75da3652218 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/a4040e34da5b49e2b0b0a75da3652218 2024-12-08T00:21:11,276 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/70f87bb59380456abe2f06330edd91e4 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/70f87bb59380456abe2f06330edd91e4 2024-12-08T00:21:11,277 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/7bc6a396b4ea47d885d096f352092c38 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/7bc6a396b4ea47d885d096f352092c38 2024-12-08T00:21:11,278 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/f1c04660e9d542c58eb24844609260f9 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/f1c04660e9d542c58eb24844609260f9 2024-12-08T00:21:11,279 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/e82f8cc0588d4785843c132501697452 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/e82f8cc0588d4785843c132501697452 2024-12-08T00:21:11,280 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/dd70acd3e07245a5980486f9e7b4f9b2, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/192453ed55f147d3a663fd9bd6a9bdd9, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/23cbb82d84e949458a1fb789c4c1bd1c, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/83394c744268448ebda34fd64ef04f7d, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/19be09838e5e4ed6b1cece58365c753f, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/ed9ebab0b30b45949d7a02e42febe516, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/675ff23d4b0e49ec979bbc044921b4a1, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/256df776d68c44339543f8ec41f48d97, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/7b3b45f443024ba5b33aac05cf8695c6, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/b934f280da104030a149e950c311abe8, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/b1f61414c39d460f9fe0a139abc27a56, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/66f6069aa95149bca6e6d2a4403cfc2b, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/aaab20cb8af44755a3fd8955faf26d83, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/f2ecd9fbea2f41c7b9bc496ce7f679e0, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/2a4d524e9bd04e98924654841d693e55, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/8c716ef536544043bd2d9dc813d9f663, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/c4c1dc4c1dec4c588dba85853cbb1db5, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/250fef7314a14a15b909605a54b77a9f, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/497ce66d2930495ea1583b49dc8ed53f, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/6d7d4aadb27d46e89cf7c11032d08413, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/25495306714d4c9bad92e5cf2311c56b, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/137e9fe51dd54effb4e5bfb591132734, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/a7956b0a92764ad3a29d5450635a5d82, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/e516bd22239840c38b5aab36c6d989ab, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/777ae3b34a864ae1bef349b15f522ace, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/f9a0189307944c47a5908ec8f75d0f4f, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/bea0f72953d64f0ea4b498af7949299c, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/a24df743301645468c1133136ae7a170, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/ae0b843ed7964e7790463589d6c42df6, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/ef257cef6b7c4030a9ee4a87eb8dbffb, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/3c4dc13230dc48e18a78ce4d41e5cc8a] to archive 2024-12-08T00:21:11,281 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-08T00:21:11,282 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/dd70acd3e07245a5980486f9e7b4f9b2 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/dd70acd3e07245a5980486f9e7b4f9b2 2024-12-08T00:21:11,283 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/192453ed55f147d3a663fd9bd6a9bdd9 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/192453ed55f147d3a663fd9bd6a9bdd9 2024-12-08T00:21:11,284 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/23cbb82d84e949458a1fb789c4c1bd1c to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/23cbb82d84e949458a1fb789c4c1bd1c 2024-12-08T00:21:11,284 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/83394c744268448ebda34fd64ef04f7d to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/83394c744268448ebda34fd64ef04f7d 2024-12-08T00:21:11,285 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/19be09838e5e4ed6b1cece58365c753f to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/19be09838e5e4ed6b1cece58365c753f 2024-12-08T00:21:11,286 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/ed9ebab0b30b45949d7a02e42febe516 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/ed9ebab0b30b45949d7a02e42febe516 2024-12-08T00:21:11,287 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/675ff23d4b0e49ec979bbc044921b4a1 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/675ff23d4b0e49ec979bbc044921b4a1 2024-12-08T00:21:11,288 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/256df776d68c44339543f8ec41f48d97 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/256df776d68c44339543f8ec41f48d97 2024-12-08T00:21:11,289 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/7b3b45f443024ba5b33aac05cf8695c6 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/7b3b45f443024ba5b33aac05cf8695c6 2024-12-08T00:21:11,289 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/b934f280da104030a149e950c311abe8 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/b934f280da104030a149e950c311abe8 2024-12-08T00:21:11,290 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/b1f61414c39d460f9fe0a139abc27a56 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/b1f61414c39d460f9fe0a139abc27a56 2024-12-08T00:21:11,291 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/66f6069aa95149bca6e6d2a4403cfc2b to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/66f6069aa95149bca6e6d2a4403cfc2b 2024-12-08T00:21:11,292 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/aaab20cb8af44755a3fd8955faf26d83 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/aaab20cb8af44755a3fd8955faf26d83 2024-12-08T00:21:11,292 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/f2ecd9fbea2f41c7b9bc496ce7f679e0 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/f2ecd9fbea2f41c7b9bc496ce7f679e0 2024-12-08T00:21:11,293 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/2a4d524e9bd04e98924654841d693e55 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/2a4d524e9bd04e98924654841d693e55 2024-12-08T00:21:11,294 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/8c716ef536544043bd2d9dc813d9f663 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/8c716ef536544043bd2d9dc813d9f663 2024-12-08T00:21:11,295 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/c4c1dc4c1dec4c588dba85853cbb1db5 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/c4c1dc4c1dec4c588dba85853cbb1db5 2024-12-08T00:21:11,295 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/250fef7314a14a15b909605a54b77a9f to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/250fef7314a14a15b909605a54b77a9f 2024-12-08T00:21:11,296 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/497ce66d2930495ea1583b49dc8ed53f to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/497ce66d2930495ea1583b49dc8ed53f 2024-12-08T00:21:11,297 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/6d7d4aadb27d46e89cf7c11032d08413 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/6d7d4aadb27d46e89cf7c11032d08413 2024-12-08T00:21:11,298 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/25495306714d4c9bad92e5cf2311c56b to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/25495306714d4c9bad92e5cf2311c56b 2024-12-08T00:21:11,299 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/137e9fe51dd54effb4e5bfb591132734 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/137e9fe51dd54effb4e5bfb591132734 2024-12-08T00:21:11,299 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/a7956b0a92764ad3a29d5450635a5d82 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/a7956b0a92764ad3a29d5450635a5d82 2024-12-08T00:21:11,300 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/e516bd22239840c38b5aab36c6d989ab to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/e516bd22239840c38b5aab36c6d989ab 2024-12-08T00:21:11,301 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/777ae3b34a864ae1bef349b15f522ace to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/777ae3b34a864ae1bef349b15f522ace 2024-12-08T00:21:11,302 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/f9a0189307944c47a5908ec8f75d0f4f to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/f9a0189307944c47a5908ec8f75d0f4f 2024-12-08T00:21:11,302 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/bea0f72953d64f0ea4b498af7949299c to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/bea0f72953d64f0ea4b498af7949299c 2024-12-08T00:21:11,303 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/a24df743301645468c1133136ae7a170 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/a24df743301645468c1133136ae7a170 2024-12-08T00:21:11,304 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/ae0b843ed7964e7790463589d6c42df6 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/ae0b843ed7964e7790463589d6c42df6 2024-12-08T00:21:11,305 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/ef257cef6b7c4030a9ee4a87eb8dbffb to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/ef257cef6b7c4030a9ee4a87eb8dbffb 2024-12-08T00:21:11,306 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/3c4dc13230dc48e18a78ce4d41e5cc8a to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/3c4dc13230dc48e18a78ce4d41e5cc8a 2024-12-08T00:21:11,307 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/df06d91414074b13aba91a24a0bc123f, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/692471317dd1457e9b9f3c832b8b5de9, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/e5ead05e71a743d18504b19a0b3e90be, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/d7f497f9bf9f4ae6a2674c0f8e2bfb69, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/49fee7a8e6574650b3abae8df284b8fa, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/0926b32f73cc436ab0c0c04345bb95be, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/2ff1e43af1a640a38b10235a7fe81a15, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/83843c4e378e4f2db19a999d89204adf, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/08d0026358a14885a2f7eae04087dda3, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/0bcabd8cb100402cbd93959420ccb52c, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/b421747bc48d46db94be9d09c8db7b9f, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/588f8663d6334128976e3a8b2f79083f, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/99983bc1d2e94ba68b2d54c8fa63cf34, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/5f57061a4a3f4c2bb6152dea58552a7b, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/95e8ad8f667642559819d616434f27c2, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/930885ef78594cf29cec14510ce11680, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/f3f55c5a214f44548bd1428ace286a41, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/782bd7a5999f4f9f893931ae9b6752e3, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/7974958caad34e5e8fc75534541d0a8e, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/e1a081aa9d1e4da9bacc9a3b6aacdd91, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/8b50c3b854544d2e88f33c39c7b032ad, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/22a56dea99c149b6bb12121413a36dc0, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/6249f95c514044b6baeceade7bc2eb88, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/bed8721c6fc247d090a8ed4dd66517f3, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/70c28185d1be46b1b4ffc446c1451b74, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/99cebb88f8b2474789359b78f51fdac4, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/52cc7de5802040a9b38a40290ca031ad, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/2ad3977d3a6d412db69c4b6e3327f84d, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/ccf9db63f09049e48e4af3d6cb071143, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/a98a8ac9a0c9440d84930a8903fa39f9, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/8994bcd3e2fc4ee5881397c9f8217fcc] to archive 2024-12-08T00:21:11,308 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-08T00:21:11,309 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/df06d91414074b13aba91a24a0bc123f to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/df06d91414074b13aba91a24a0bc123f 2024-12-08T00:21:11,310 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/692471317dd1457e9b9f3c832b8b5de9 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/692471317dd1457e9b9f3c832b8b5de9 2024-12-08T00:21:11,310 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/e5ead05e71a743d18504b19a0b3e90be to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/e5ead05e71a743d18504b19a0b3e90be 2024-12-08T00:21:11,311 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/d7f497f9bf9f4ae6a2674c0f8e2bfb69 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/d7f497f9bf9f4ae6a2674c0f8e2bfb69 2024-12-08T00:21:11,312 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/49fee7a8e6574650b3abae8df284b8fa to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/49fee7a8e6574650b3abae8df284b8fa 2024-12-08T00:21:11,313 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/0926b32f73cc436ab0c0c04345bb95be to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/0926b32f73cc436ab0c0c04345bb95be 2024-12-08T00:21:11,314 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/2ff1e43af1a640a38b10235a7fe81a15 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/2ff1e43af1a640a38b10235a7fe81a15 2024-12-08T00:21:11,315 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/83843c4e378e4f2db19a999d89204adf to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/83843c4e378e4f2db19a999d89204adf 2024-12-08T00:21:11,316 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/08d0026358a14885a2f7eae04087dda3 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/08d0026358a14885a2f7eae04087dda3 2024-12-08T00:21:11,317 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/0bcabd8cb100402cbd93959420ccb52c to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/0bcabd8cb100402cbd93959420ccb52c 2024-12-08T00:21:11,318 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/b421747bc48d46db94be9d09c8db7b9f to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/b421747bc48d46db94be9d09c8db7b9f 2024-12-08T00:21:11,318 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/588f8663d6334128976e3a8b2f79083f to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/588f8663d6334128976e3a8b2f79083f 2024-12-08T00:21:11,319 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/99983bc1d2e94ba68b2d54c8fa63cf34 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/99983bc1d2e94ba68b2d54c8fa63cf34 2024-12-08T00:21:11,320 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/5f57061a4a3f4c2bb6152dea58552a7b to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/5f57061a4a3f4c2bb6152dea58552a7b 2024-12-08T00:21:11,321 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/95e8ad8f667642559819d616434f27c2 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/95e8ad8f667642559819d616434f27c2 2024-12-08T00:21:11,322 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/930885ef78594cf29cec14510ce11680 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/930885ef78594cf29cec14510ce11680 2024-12-08T00:21:11,323 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/f3f55c5a214f44548bd1428ace286a41 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/f3f55c5a214f44548bd1428ace286a41 2024-12-08T00:21:11,323 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/782bd7a5999f4f9f893931ae9b6752e3 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/782bd7a5999f4f9f893931ae9b6752e3 2024-12-08T00:21:11,324 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/7974958caad34e5e8fc75534541d0a8e to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/7974958caad34e5e8fc75534541d0a8e 2024-12-08T00:21:11,325 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/e1a081aa9d1e4da9bacc9a3b6aacdd91 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/e1a081aa9d1e4da9bacc9a3b6aacdd91 2024-12-08T00:21:11,326 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/8b50c3b854544d2e88f33c39c7b032ad to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/8b50c3b854544d2e88f33c39c7b032ad 2024-12-08T00:21:11,327 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/22a56dea99c149b6bb12121413a36dc0 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/22a56dea99c149b6bb12121413a36dc0 2024-12-08T00:21:11,327 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/6249f95c514044b6baeceade7bc2eb88 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/6249f95c514044b6baeceade7bc2eb88 2024-12-08T00:21:11,328 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/bed8721c6fc247d090a8ed4dd66517f3 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/bed8721c6fc247d090a8ed4dd66517f3 2024-12-08T00:21:11,329 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/70c28185d1be46b1b4ffc446c1451b74 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/70c28185d1be46b1b4ffc446c1451b74 2024-12-08T00:21:11,330 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/99cebb88f8b2474789359b78f51fdac4 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/99cebb88f8b2474789359b78f51fdac4 2024-12-08T00:21:11,330 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/52cc7de5802040a9b38a40290ca031ad to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/52cc7de5802040a9b38a40290ca031ad 2024-12-08T00:21:11,331 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/2ad3977d3a6d412db69c4b6e3327f84d to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/2ad3977d3a6d412db69c4b6e3327f84d 2024-12-08T00:21:11,332 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/ccf9db63f09049e48e4af3d6cb071143 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/ccf9db63f09049e48e4af3d6cb071143 2024-12-08T00:21:11,333 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/a98a8ac9a0c9440d84930a8903fa39f9 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/a98a8ac9a0c9440d84930a8903fa39f9 2024-12-08T00:21:11,334 DEBUG [StoreCloser-TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/8994bcd3e2fc4ee5881397c9f8217fcc to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/8994bcd3e2fc4ee5881397c9f8217fcc 2024-12-08T00:21:11,337 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/recovered.edits/456.seqid, newMaxSeqId=456, maxSeqId=1 2024-12-08T00:21:11,338 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27. 2024-12-08T00:21:11,338 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1635): Region close journal for d6019a516c33d3d08395be7add424e27: 2024-12-08T00:21:11,339 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(170): Closed d6019a516c33d3d08395be7add424e27 2024-12-08T00:21:11,340 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=95 updating hbase:meta row=d6019a516c33d3d08395be7add424e27, regionState=CLOSED 2024-12-08T00:21:11,341 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=96, resume processing ppid=95 2024-12-08T00:21:11,341 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=96, ppid=95, state=SUCCESS; CloseRegionProcedure d6019a516c33d3d08395be7add424e27, server=017dd09fb407,36703,1733617179335 in 1.4830 sec 2024-12-08T00:21:11,342 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=95, resume processing ppid=94 2024-12-08T00:21:11,342 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=95, ppid=94, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=d6019a516c33d3d08395be7add424e27, UNASSIGN in 1.4860 sec 2024-12-08T00:21:11,344 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=94, resume processing ppid=93 2024-12-08T00:21:11,344 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=94, ppid=93, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.4890 sec 2024-12-08T00:21:11,345 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733617271344"}]},"ts":"1733617271344"} 2024-12-08T00:21:11,345 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-08T00:21:11,347 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-08T00:21:11,348 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=93, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.5000 sec 2024-12-08T00:21:11,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-12-08T00:21:11,955 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 93 completed 2024-12-08T00:21:11,955 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-08T00:21:11,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=97, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T00:21:11,956 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=97, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T00:21:11,957 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=97, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T00:21:11,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=97 2024-12-08T00:21:11,958 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27 2024-12-08T00:21:11,960 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A, FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B, FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C, FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/recovered.edits] 2024-12-08T00:21:11,962 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/851693d386ed401181082a1fae00ce98 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/851693d386ed401181082a1fae00ce98 2024-12-08T00:21:11,963 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/999c4a77c72f44f58548d2664bd07d9c to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/999c4a77c72f44f58548d2664bd07d9c 2024-12-08T00:21:11,964 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/ec5499e9a7d74983a400ff1b6ef4d8c0 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/A/ec5499e9a7d74983a400ff1b6ef4d8c0 2024-12-08T00:21:11,966 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/2c5022d1d12346e09dc04fd0abc22ede to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/2c5022d1d12346e09dc04fd0abc22ede 2024-12-08T00:21:11,967 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/ecec68678be4415a890ed6fb170436ae to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/ecec68678be4415a890ed6fb170436ae 2024-12-08T00:21:11,968 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/ef02bab3276643d9b4dba98d79b23a6e to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/B/ef02bab3276643d9b4dba98d79b23a6e 2024-12-08T00:21:11,970 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/77aa139df96849b5a02a88b778fa3f15 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/77aa139df96849b5a02a88b778fa3f15 2024-12-08T00:21:11,971 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/8cfc110b0d6747fab0121074697a6d9c to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/8cfc110b0d6747fab0121074697a6d9c 2024-12-08T00:21:11,972 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/eda2263c94174fde90b553fa9d6a7fb7 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/C/eda2263c94174fde90b553fa9d6a7fb7 2024-12-08T00:21:11,974 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/recovered.edits/456.seqid to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27/recovered.edits/456.seqid 2024-12-08T00:21:11,974 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/d6019a516c33d3d08395be7add424e27 2024-12-08T00:21:11,974 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-08T00:21:11,976 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=97, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T00:21:11,978 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-08T00:21:11,980 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-12-08T00:21:11,981 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=97, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T00:21:11,981 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-12-08T00:21:11,981 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733617271981"}]},"ts":"9223372036854775807"} 2024-12-08T00:21:11,983 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-08T00:21:11,983 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => d6019a516c33d3d08395be7add424e27, NAME => 'TestAcidGuarantees,,1733617243996.d6019a516c33d3d08395be7add424e27.', STARTKEY => '', ENDKEY => ''}] 2024-12-08T00:21:11,983 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-12-08T00:21:11,983 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733617271983"}]},"ts":"9223372036854775807"} 2024-12-08T00:21:11,984 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-08T00:21:11,986 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=97, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T00:21:11,987 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=97, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 31 msec 2024-12-08T00:21:12,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=97 2024-12-08T00:21:12,058 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 97 completed 2024-12-08T00:21:12,067 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=241 (was 241), OpenFileDescriptor=461 (was 459) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=412 (was 474), ProcessCount=11 (was 11), AvailableMemoryMB=7610 (was 7693) 2024-12-08T00:21:12,075 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=241, OpenFileDescriptor=461, MaxFileDescriptor=1048576, SystemLoadAverage=412, ProcessCount=11, AvailableMemoryMB=7610 2024-12-08T00:21:12,077 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-08T00:21:12,077 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T00:21:12,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=98, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-08T00:21:12,079 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=98, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-08T00:21:12,079 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:12,079 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 98 2024-12-08T00:21:12,079 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=98, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-08T00:21:12,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-08T00:21:12,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742243_1419 (size=963) 2024-12-08T00:21:12,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-08T00:21:12,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-08T00:21:12,488 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3 2024-12-08T00:21:12,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742244_1420 (size=53) 2024-12-08T00:21:12,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-08T00:21:12,894 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:21:12,894 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 46a39620848480c2f6f28f4fa1ea64a8, disabling compactions & flushes 2024-12-08T00:21:12,894 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:12,894 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:12,894 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. after waiting 0 ms 2024-12-08T00:21:12,894 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:12,894 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:12,894 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 46a39620848480c2f6f28f4fa1ea64a8: 2024-12-08T00:21:12,895 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=98, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-08T00:21:12,895 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733617272895"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733617272895"}]},"ts":"1733617272895"} 2024-12-08T00:21:12,896 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-08T00:21:12,896 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=98, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-08T00:21:12,897 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733617272896"}]},"ts":"1733617272896"} 2024-12-08T00:21:12,897 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-08T00:21:12,901 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=99, ppid=98, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=46a39620848480c2f6f28f4fa1ea64a8, ASSIGN}] 2024-12-08T00:21:12,902 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=99, ppid=98, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=46a39620848480c2f6f28f4fa1ea64a8, ASSIGN 2024-12-08T00:21:12,902 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=99, ppid=98, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=46a39620848480c2f6f28f4fa1ea64a8, ASSIGN; state=OFFLINE, location=017dd09fb407,36703,1733617179335; forceNewPlan=false, retain=false 2024-12-08T00:21:13,053 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=99 updating hbase:meta row=46a39620848480c2f6f28f4fa1ea64a8, regionState=OPENING, regionLocation=017dd09fb407,36703,1733617179335 2024-12-08T00:21:13,054 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=100, ppid=99, state=RUNNABLE; OpenRegionProcedure 46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335}] 2024-12-08T00:21:13,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-08T00:21:13,205 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:13,208 INFO [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:13,208 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7285): Opening region: {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} 2024-12-08T00:21:13,208 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:13,208 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:21:13,208 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7327): checking encryption for 46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:13,208 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7330): checking classloading for 46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:13,209 INFO [StoreOpener-46a39620848480c2f6f28f4fa1ea64a8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:13,210 INFO [StoreOpener-46a39620848480c2f6f28f4fa1ea64a8-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-08T00:21:13,211 INFO [StoreOpener-46a39620848480c2f6f28f4fa1ea64a8-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 46a39620848480c2f6f28f4fa1ea64a8 columnFamilyName A 2024-12-08T00:21:13,211 DEBUG [StoreOpener-46a39620848480c2f6f28f4fa1ea64a8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:13,211 INFO [StoreOpener-46a39620848480c2f6f28f4fa1ea64a8-1 {}] regionserver.HStore(327): Store=46a39620848480c2f6f28f4fa1ea64a8/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:21:13,211 INFO [StoreOpener-46a39620848480c2f6f28f4fa1ea64a8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:13,212 INFO [StoreOpener-46a39620848480c2f6f28f4fa1ea64a8-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-08T00:21:13,212 INFO [StoreOpener-46a39620848480c2f6f28f4fa1ea64a8-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 46a39620848480c2f6f28f4fa1ea64a8 columnFamilyName B 2024-12-08T00:21:13,212 DEBUG [StoreOpener-46a39620848480c2f6f28f4fa1ea64a8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:13,212 INFO [StoreOpener-46a39620848480c2f6f28f4fa1ea64a8-1 {}] regionserver.HStore(327): Store=46a39620848480c2f6f28f4fa1ea64a8/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:21:13,213 INFO [StoreOpener-46a39620848480c2f6f28f4fa1ea64a8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:13,213 INFO [StoreOpener-46a39620848480c2f6f28f4fa1ea64a8-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-08T00:21:13,213 INFO [StoreOpener-46a39620848480c2f6f28f4fa1ea64a8-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 46a39620848480c2f6f28f4fa1ea64a8 columnFamilyName C 2024-12-08T00:21:13,213 DEBUG [StoreOpener-46a39620848480c2f6f28f4fa1ea64a8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:13,214 INFO [StoreOpener-46a39620848480c2f6f28f4fa1ea64a8-1 {}] regionserver.HStore(327): Store=46a39620848480c2f6f28f4fa1ea64a8/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:21:13,214 INFO [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:13,214 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:13,215 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:13,216 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-08T00:21:13,216 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1085): writing seq id for 46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:13,218 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T00:21:13,218 INFO [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1102): Opened 46a39620848480c2f6f28f4fa1ea64a8; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72207822, jitterRate=0.07598039507865906}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-08T00:21:13,219 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1001): Region open journal for 46a39620848480c2f6f28f4fa1ea64a8: 2024-12-08T00:21:13,220 INFO [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8., pid=100, masterSystemTime=1733617273205 2024-12-08T00:21:13,221 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:13,221 INFO [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:13,221 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=99 updating hbase:meta row=46a39620848480c2f6f28f4fa1ea64a8, regionState=OPEN, openSeqNum=2, regionLocation=017dd09fb407,36703,1733617179335 2024-12-08T00:21:13,223 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=100, resume processing ppid=99 2024-12-08T00:21:13,223 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=100, ppid=99, state=SUCCESS; OpenRegionProcedure 46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 in 168 msec 2024-12-08T00:21:13,224 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=99, resume processing ppid=98 2024-12-08T00:21:13,224 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=99, ppid=98, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=46a39620848480c2f6f28f4fa1ea64a8, ASSIGN in 322 msec 2024-12-08T00:21:13,225 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=98, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-08T00:21:13,225 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733617273225"}]},"ts":"1733617273225"} 2024-12-08T00:21:13,226 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-08T00:21:13,228 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=98, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-08T00:21:13,229 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=98, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1510 sec 2024-12-08T00:21:14,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-08T00:21:14,184 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 98 completed 2024-12-08T00:21:14,185 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1df61dc9 to 127.0.0.1:62287 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5fe71801 2024-12-08T00:21:14,188 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@bf5e2f0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:21:14,189 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:21:14,191 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51268, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:21:14,191 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-08T00:21:14,192 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58486, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-08T00:21:14,194 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-08T00:21:14,194 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T00:21:14,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=101, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-08T00:21:14,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742245_1421 (size=999) 2024-12-08T00:21:14,604 DEBUG [PEWorker-1 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-08T00:21:14,604 INFO [PEWorker-1 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-08T00:21:14,606 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=102, ppid=101, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-08T00:21:14,608 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=103, ppid=102, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=46a39620848480c2f6f28f4fa1ea64a8, REOPEN/MOVE}] 2024-12-08T00:21:14,608 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=103, ppid=102, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=46a39620848480c2f6f28f4fa1ea64a8, REOPEN/MOVE 2024-12-08T00:21:14,609 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=103 updating hbase:meta row=46a39620848480c2f6f28f4fa1ea64a8, regionState=CLOSING, regionLocation=017dd09fb407,36703,1733617179335 2024-12-08T00:21:14,610 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-08T00:21:14,610 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=104, ppid=103, state=RUNNABLE; CloseRegionProcedure 46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335}] 2024-12-08T00:21:14,761 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:14,762 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] handler.UnassignRegionHandler(124): Close 46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:14,762 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-08T00:21:14,762 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1681): Closing 46a39620848480c2f6f28f4fa1ea64a8, disabling compactions & flushes 2024-12-08T00:21:14,762 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:14,762 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:14,762 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. after waiting 0 ms 2024-12-08T00:21:14,762 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:14,765 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-08T00:21:14,766 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:14,766 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1635): Region close journal for 46a39620848480c2f6f28f4fa1ea64a8: 2024-12-08T00:21:14,766 WARN [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegionServer(3786): Not adding moved region record: 46a39620848480c2f6f28f4fa1ea64a8 to self. 2024-12-08T00:21:14,767 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] handler.UnassignRegionHandler(170): Closed 46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:14,768 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=103 updating hbase:meta row=46a39620848480c2f6f28f4fa1ea64a8, regionState=CLOSED 2024-12-08T00:21:14,769 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=104, resume processing ppid=103 2024-12-08T00:21:14,769 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=104, ppid=103, state=SUCCESS; CloseRegionProcedure 46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 in 158 msec 2024-12-08T00:21:14,770 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=103, ppid=102, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=46a39620848480c2f6f28f4fa1ea64a8, REOPEN/MOVE; state=CLOSED, location=017dd09fb407,36703,1733617179335; forceNewPlan=false, retain=true 2024-12-08T00:21:14,920 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=103 updating hbase:meta row=46a39620848480c2f6f28f4fa1ea64a8, regionState=OPENING, regionLocation=017dd09fb407,36703,1733617179335 2024-12-08T00:21:14,921 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=105, ppid=103, state=RUNNABLE; OpenRegionProcedure 46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335}] 2024-12-08T00:21:15,073 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:15,076 INFO [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:15,076 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(7285): Opening region: {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} 2024-12-08T00:21:15,076 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:15,076 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:21:15,077 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(7327): checking encryption for 46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:15,077 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(7330): checking classloading for 46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:15,078 INFO [StoreOpener-46a39620848480c2f6f28f4fa1ea64a8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:15,079 INFO [StoreOpener-46a39620848480c2f6f28f4fa1ea64a8-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-08T00:21:15,079 INFO [StoreOpener-46a39620848480c2f6f28f4fa1ea64a8-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 46a39620848480c2f6f28f4fa1ea64a8 columnFamilyName A 2024-12-08T00:21:15,080 DEBUG [StoreOpener-46a39620848480c2f6f28f4fa1ea64a8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:15,080 INFO [StoreOpener-46a39620848480c2f6f28f4fa1ea64a8-1 {}] regionserver.HStore(327): Store=46a39620848480c2f6f28f4fa1ea64a8/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:21:15,081 INFO [StoreOpener-46a39620848480c2f6f28f4fa1ea64a8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:15,081 INFO [StoreOpener-46a39620848480c2f6f28f4fa1ea64a8-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-08T00:21:15,081 INFO [StoreOpener-46a39620848480c2f6f28f4fa1ea64a8-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 46a39620848480c2f6f28f4fa1ea64a8 columnFamilyName B 2024-12-08T00:21:15,081 DEBUG [StoreOpener-46a39620848480c2f6f28f4fa1ea64a8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:15,082 INFO [StoreOpener-46a39620848480c2f6f28f4fa1ea64a8-1 {}] regionserver.HStore(327): Store=46a39620848480c2f6f28f4fa1ea64a8/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:21:15,082 INFO [StoreOpener-46a39620848480c2f6f28f4fa1ea64a8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:15,082 INFO [StoreOpener-46a39620848480c2f6f28f4fa1ea64a8-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-08T00:21:15,082 INFO [StoreOpener-46a39620848480c2f6f28f4fa1ea64a8-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 46a39620848480c2f6f28f4fa1ea64a8 columnFamilyName C 2024-12-08T00:21:15,082 DEBUG [StoreOpener-46a39620848480c2f6f28f4fa1ea64a8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:15,083 INFO [StoreOpener-46a39620848480c2f6f28f4fa1ea64a8-1 {}] regionserver.HStore(327): Store=46a39620848480c2f6f28f4fa1ea64a8/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:21:15,083 INFO [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:15,084 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:15,084 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:15,086 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-08T00:21:15,087 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(1085): writing seq id for 46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:15,087 INFO [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(1102): Opened 46a39620848480c2f6f28f4fa1ea64a8; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70309347, jitterRate=0.04769091308116913}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-08T00:21:15,088 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(1001): Region open journal for 46a39620848480c2f6f28f4fa1ea64a8: 2024-12-08T00:21:15,088 INFO [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8., pid=105, masterSystemTime=1733617275073 2024-12-08T00:21:15,090 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:15,090 INFO [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:15,090 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=103 updating hbase:meta row=46a39620848480c2f6f28f4fa1ea64a8, regionState=OPEN, openSeqNum=5, regionLocation=017dd09fb407,36703,1733617179335 2024-12-08T00:21:15,092 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=105, resume processing ppid=103 2024-12-08T00:21:15,092 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=105, ppid=103, state=SUCCESS; OpenRegionProcedure 46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 in 170 msec 2024-12-08T00:21:15,093 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=103, resume processing ppid=102 2024-12-08T00:21:15,093 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=103, ppid=102, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=46a39620848480c2f6f28f4fa1ea64a8, REOPEN/MOVE in 485 msec 2024-12-08T00:21:15,095 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=102, resume processing ppid=101 2024-12-08T00:21:15,095 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=102, ppid=101, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 488 msec 2024-12-08T00:21:15,096 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=101, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 901 msec 2024-12-08T00:21:15,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=101 2024-12-08T00:21:15,098 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3637e4c6 to 127.0.0.1:62287 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@51f7d511 2024-12-08T00:21:15,105 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@75b14fbd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:21:15,105 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x72f422b4 to 127.0.0.1:62287 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1dc42ea6 2024-12-08T00:21:15,109 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@62f74604, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:21:15,109 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2df33cdf to 127.0.0.1:62287 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@117e86d9 2024-12-08T00:21:15,112 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@49e13594, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:21:15,112 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x09f472e0 to 127.0.0.1:62287 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6cd96549 2024-12-08T00:21:15,118 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c54a0d3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:21:15,118 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x167a78b0 to 127.0.0.1:62287 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@31aea41b 2024-12-08T00:21:15,121 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3875c8c5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:21:15,122 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1e247aa1 to 127.0.0.1:62287 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@801ba40 2024-12-08T00:21:15,124 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@319559be, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:21:15,125 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2205f666 to 127.0.0.1:62287 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@27539bdc 2024-12-08T00:21:15,127 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c907e21, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:21:15,128 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6584e9ce to 127.0.0.1:62287 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5e3203d9 2024-12-08T00:21:15,130 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@61ec0f48, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:21:15,130 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x37ec8e3b to 127.0.0.1:62287 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@798e7fd4 2024-12-08T00:21:15,133 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7819b9e2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:21:15,133 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x787e5169 to 127.0.0.1:62287 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7284f16d 2024-12-08T00:21:15,136 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47679076, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:21:15,139 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T00:21:15,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees 2024-12-08T00:21:15,140 DEBUG [hconnection-0x2e3232fb-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:21:15,142 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51278, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:21:15,142 DEBUG [hconnection-0x5c3e5595-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:21:15,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-08T00:21:15,142 DEBUG [hconnection-0x5ec250e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:21:15,142 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:21:15,143 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:21:15,143 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51294, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:21:15,143 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=107, ppid=106, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:21:15,143 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51310, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:21:15,148 DEBUG [hconnection-0x72809a64-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:21:15,149 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51312, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:21:15,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on 46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:15,150 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 46a39620848480c2f6f28f4fa1ea64a8 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-08T00:21:15,150 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=A 2024-12-08T00:21:15,150 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:15,150 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=B 2024-12-08T00:21:15,150 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:15,150 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=C 2024-12-08T00:21:15,150 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:15,152 DEBUG [hconnection-0x790aec38-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:21:15,152 DEBUG [hconnection-0x5b80da05-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:21:15,153 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51326, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:21:15,153 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51334, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:21:15,156 DEBUG [hconnection-0x4957108f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:21:15,157 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51336, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:21:15,158 DEBUG [hconnection-0x1f0a4796-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:21:15,159 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51350, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:21:15,160 DEBUG [hconnection-0x25930fb8-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:21:15,160 DEBUG [hconnection-0x40af2447-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:21:15,161 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51362, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:21:15,162 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51372, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:21:15,162 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:15,162 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:15,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617335162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:15,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617335162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:15,163 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:15,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617335163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:15,164 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:15,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617335164, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:15,168 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:15,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617335168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:15,182 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208bc7f42ce9c6b4d33908a7a612f87169e_46a39620848480c2f6f28f4fa1ea64a8 is 50, key is test_row_0/A:col10/1733617275148/Put/seqid=0 2024-12-08T00:21:15,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742246_1422 (size=12154) 2024-12-08T00:21:15,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-08T00:21:15,265 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:15,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617335264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:15,266 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:15,266 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:15,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617335264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:15,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617335264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:15,267 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:15,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617335265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:15,271 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:15,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617335269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:15,294 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:15,295 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-08T00:21:15,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:15,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. as already flushing 2024-12-08T00:21:15,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:15,295 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:15,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:15,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:15,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-08T00:21:15,447 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:15,447 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-08T00:21:15,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:15,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. as already flushing 2024-12-08T00:21:15,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:15,448 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:15,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:15,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:15,469 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:15,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617335468, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:15,472 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:15,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617335468, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:15,473 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:15,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617335468, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:15,473 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:15,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617335469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:15,475 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:15,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617335472, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:15,592 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:15,596 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208bc7f42ce9c6b4d33908a7a612f87169e_46a39620848480c2f6f28f4fa1ea64a8 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208bc7f42ce9c6b4d33908a7a612f87169e_46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:15,597 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/432fd7fceb644da5bcf136c54d6ab92a, store: [table=TestAcidGuarantees family=A region=46a39620848480c2f6f28f4fa1ea64a8] 2024-12-08T00:21:15,598 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/432fd7fceb644da5bcf136c54d6ab92a is 175, key is test_row_0/A:col10/1733617275148/Put/seqid=0 2024-12-08T00:21:15,600 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:15,600 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-08T00:21:15,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:15,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. as already flushing 2024-12-08T00:21:15,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:15,601 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:15,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:15,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:15,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742247_1423 (size=30955) 2024-12-08T00:21:15,604 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=17, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/432fd7fceb644da5bcf136c54d6ab92a 2024-12-08T00:21:15,626 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/d9b9fb42a3744aec8d27da0d042cd1a5 is 50, key is test_row_0/B:col10/1733617275148/Put/seqid=0 2024-12-08T00:21:15,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742248_1424 (size=12001) 2024-12-08T00:21:15,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-08T00:21:15,753 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:15,753 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-08T00:21:15,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:15,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. as already flushing 2024-12-08T00:21:15,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:15,754 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:15,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:15,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:15,777 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:15,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617335772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:15,779 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:15,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617335775, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:15,779 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:15,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617335776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:15,780 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:15,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617335776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:15,780 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:15,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617335776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:15,906 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:15,906 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-08T00:21:15,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:15,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. as already flushing 2024-12-08T00:21:15,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:15,906 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:15,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:15,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:16,039 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/d9b9fb42a3744aec8d27da0d042cd1a5 2024-12-08T00:21:16,058 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:16,059 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-08T00:21:16,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:16,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. as already flushing 2024-12-08T00:21:16,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:16,059 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:16,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:16,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:16,062 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/261732359f6842bbb6a3b7ea60dcfe59 is 50, key is test_row_0/C:col10/1733617275148/Put/seqid=0 2024-12-08T00:21:16,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742249_1425 (size=12001) 2024-12-08T00:21:16,067 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/261732359f6842bbb6a3b7ea60dcfe59 2024-12-08T00:21:16,072 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/432fd7fceb644da5bcf136c54d6ab92a as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/432fd7fceb644da5bcf136c54d6ab92a 2024-12-08T00:21:16,077 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/432fd7fceb644da5bcf136c54d6ab92a, entries=150, sequenceid=17, filesize=30.2 K 2024-12-08T00:21:16,079 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/d9b9fb42a3744aec8d27da0d042cd1a5 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/d9b9fb42a3744aec8d27da0d042cd1a5 2024-12-08T00:21:16,084 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/d9b9fb42a3744aec8d27da0d042cd1a5, entries=150, sequenceid=17, filesize=11.7 K 2024-12-08T00:21:16,085 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/261732359f6842bbb6a3b7ea60dcfe59 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/261732359f6842bbb6a3b7ea60dcfe59 2024-12-08T00:21:16,092 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/261732359f6842bbb6a3b7ea60dcfe59, entries=150, sequenceid=17, filesize=11.7 K 2024-12-08T00:21:16,093 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=147.60 KB/151140 for 46a39620848480c2f6f28f4fa1ea64a8 in 944ms, sequenceid=17, compaction requested=false 2024-12-08T00:21:16,093 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-08T00:21:16,094 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 46a39620848480c2f6f28f4fa1ea64a8: 2024-12-08T00:21:16,211 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:16,211 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-08T00:21:16,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:16,212 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2837): Flushing 46a39620848480c2f6f28f4fa1ea64a8 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-08T00:21:16,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=A 2024-12-08T00:21:16,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:16,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=B 2024-12-08T00:21:16,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:16,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=C 2024-12-08T00:21:16,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:16,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412089c1425fee5e74608a1f0b635e852fdc6_46a39620848480c2f6f28f4fa1ea64a8 is 50, key is test_row_0/A:col10/1733617275160/Put/seqid=0 2024-12-08T00:21:16,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742250_1426 (size=12154) 2024-12-08T00:21:16,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:16,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-08T00:21:16,247 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412089c1425fee5e74608a1f0b635e852fdc6_46a39620848480c2f6f28f4fa1ea64a8 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412089c1425fee5e74608a1f0b635e852fdc6_46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:16,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/c0795185bb464c779a85e71ca0408af8, store: [table=TestAcidGuarantees family=A region=46a39620848480c2f6f28f4fa1ea64a8] 2024-12-08T00:21:16,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/c0795185bb464c779a85e71ca0408af8 is 175, key is test_row_0/A:col10/1733617275160/Put/seqid=0 2024-12-08T00:21:16,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742251_1427 (size=30955) 2024-12-08T00:21:16,252 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=42, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/c0795185bb464c779a85e71ca0408af8 2024-12-08T00:21:16,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/f00285bc3bea43cf8e091e46b36c3625 is 50, key is test_row_0/B:col10/1733617275160/Put/seqid=0 2024-12-08T00:21:16,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742252_1428 (size=12001) 2024-12-08T00:21:16,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on 46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:16,283 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. as already flushing 2024-12-08T00:21:16,289 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:16,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617336286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:16,290 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:16,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617336287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:16,293 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:16,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617336288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:16,293 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:16,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617336289, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:16,294 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:16,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617336289, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:16,391 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:16,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617336391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:16,391 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:16,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617336391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:16,398 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:16,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617336394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:16,398 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:16,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617336394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:16,399 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:16,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617336395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:16,595 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:16,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617336592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:16,598 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:16,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617336593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:16,603 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:16,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617336599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:16,603 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:16,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617336601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:16,605 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:16,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617336601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:16,671 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/f00285bc3bea43cf8e091e46b36c3625 2024-12-08T00:21:16,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/10052a952488465f807ca27c14d53f8e is 50, key is test_row_0/C:col10/1733617275160/Put/seqid=0 2024-12-08T00:21:16,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742253_1429 (size=12001) 2024-12-08T00:21:16,683 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/10052a952488465f807ca27c14d53f8e 2024-12-08T00:21:16,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/c0795185bb464c779a85e71ca0408af8 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/c0795185bb464c779a85e71ca0408af8 2024-12-08T00:21:16,692 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/c0795185bb464c779a85e71ca0408af8, entries=150, sequenceid=42, filesize=30.2 K 2024-12-08T00:21:16,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/f00285bc3bea43cf8e091e46b36c3625 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/f00285bc3bea43cf8e091e46b36c3625 2024-12-08T00:21:16,696 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/f00285bc3bea43cf8e091e46b36c3625, entries=150, sequenceid=42, filesize=11.7 K 2024-12-08T00:21:16,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/10052a952488465f807ca27c14d53f8e as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/10052a952488465f807ca27c14d53f8e 2024-12-08T00:21:16,700 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/10052a952488465f807ca27c14d53f8e, entries=150, sequenceid=42, filesize=11.7 K 2024-12-08T00:21:16,701 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 46a39620848480c2f6f28f4fa1ea64a8 in 489ms, sequenceid=42, compaction requested=false 2024-12-08T00:21:16,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2538): Flush status journal for 46a39620848480c2f6f28f4fa1ea64a8: 2024-12-08T00:21:16,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:16,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=107 2024-12-08T00:21:16,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4106): Remote procedure done, pid=107 2024-12-08T00:21:16,703 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=107, resume processing ppid=106 2024-12-08T00:21:16,704 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=107, ppid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5590 sec 2024-12-08T00:21:16,705 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees in 1.5650 sec 2024-12-08T00:21:16,862 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-08T00:21:16,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on 46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:16,901 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 46a39620848480c2f6f28f4fa1ea64a8 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-08T00:21:16,902 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=A 2024-12-08T00:21:16,902 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:16,902 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=B 2024-12-08T00:21:16,902 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:16,902 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=C 2024-12-08T00:21:16,902 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:16,909 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208316357cdf25d4e4f9b8ce3ca6b98cc69_46a39620848480c2f6f28f4fa1ea64a8 is 50, key is test_row_0/A:col10/1733617276900/Put/seqid=0 2024-12-08T00:21:16,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742254_1430 (size=14594) 2024-12-08T00:21:16,917 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:16,922 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208316357cdf25d4e4f9b8ce3ca6b98cc69_46a39620848480c2f6f28f4fa1ea64a8 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208316357cdf25d4e4f9b8ce3ca6b98cc69_46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:16,922 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/b2f525cde39d4b91977b76a885fcda22, store: [table=TestAcidGuarantees family=A region=46a39620848480c2f6f28f4fa1ea64a8] 2024-12-08T00:21:16,923 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/b2f525cde39d4b91977b76a885fcda22 is 175, key is test_row_0/A:col10/1733617276900/Put/seqid=0 2024-12-08T00:21:16,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742255_1431 (size=39549) 2024-12-08T00:21:16,927 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=54, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/b2f525cde39d4b91977b76a885fcda22 2024-12-08T00:21:16,933 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/b1b743ce0c7440ac9ce3c3f5bfefb50c is 50, key is test_row_0/B:col10/1733617276900/Put/seqid=0 2024-12-08T00:21:16,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742256_1432 (size=12001) 2024-12-08T00:21:16,947 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/b1b743ce0c7440ac9ce3c3f5bfefb50c 2024-12-08T00:21:16,953 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/7e7e42ed3632403a8b7223153ef72e3e is 50, key is test_row_0/C:col10/1733617276900/Put/seqid=0 2024-12-08T00:21:16,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742257_1433 (size=12001) 2024-12-08T00:21:16,986 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:16,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617336933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:16,987 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:16,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617336933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:16,995 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:16,995 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:16,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617336986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:16,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617336986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:16,995 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:16,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617336986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:17,091 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:17,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617337088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:17,091 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:17,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617337088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:17,099 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:17,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617337096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:17,099 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:17,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617337096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:17,099 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:17,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617337096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:17,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-08T00:21:17,246 INFO [Thread-1871 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 106 completed 2024-12-08T00:21:17,247 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T00:21:17,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees 2024-12-08T00:21:17,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-08T00:21:17,249 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:21:17,249 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:21:17,249 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=109, ppid=108, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:21:17,296 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:17,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617337293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:17,296 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:17,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617337293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:17,301 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:17,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617337300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:17,301 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:17,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617337300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:17,302 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:17,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617337300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:17,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-08T00:21:17,363 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/7e7e42ed3632403a8b7223153ef72e3e 2024-12-08T00:21:17,369 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/b2f525cde39d4b91977b76a885fcda22 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/b2f525cde39d4b91977b76a885fcda22 2024-12-08T00:21:17,376 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/b2f525cde39d4b91977b76a885fcda22, entries=200, sequenceid=54, filesize=38.6 K 2024-12-08T00:21:17,377 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/b1b743ce0c7440ac9ce3c3f5bfefb50c as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/b1b743ce0c7440ac9ce3c3f5bfefb50c 2024-12-08T00:21:17,381 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/b1b743ce0c7440ac9ce3c3f5bfefb50c, entries=150, sequenceid=54, filesize=11.7 K 2024-12-08T00:21:17,382 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/7e7e42ed3632403a8b7223153ef72e3e as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/7e7e42ed3632403a8b7223153ef72e3e 2024-12-08T00:21:17,385 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/7e7e42ed3632403a8b7223153ef72e3e, entries=150, sequenceid=54, filesize=11.7 K 2024-12-08T00:21:17,386 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 46a39620848480c2f6f28f4fa1ea64a8 in 485ms, sequenceid=54, compaction requested=true 2024-12-08T00:21:17,386 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 46a39620848480c2f6f28f4fa1ea64a8: 2024-12-08T00:21:17,386 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 46a39620848480c2f6f28f4fa1ea64a8:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:21:17,386 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:17,386 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 46a39620848480c2f6f28f4fa1ea64a8:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T00:21:17,386 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:17,386 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:21:17,386 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:21:17,386 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 46a39620848480c2f6f28f4fa1ea64a8:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:21:17,386 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:21:17,387 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101459 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:21:17,387 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:21:17,387 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): 46a39620848480c2f6f28f4fa1ea64a8/A is initiating minor compaction (all files) 2024-12-08T00:21:17,387 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): 46a39620848480c2f6f28f4fa1ea64a8/B is initiating minor compaction (all files) 2024-12-08T00:21:17,387 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 46a39620848480c2f6f28f4fa1ea64a8/A in TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:17,387 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 46a39620848480c2f6f28f4fa1ea64a8/B in TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:17,387 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/432fd7fceb644da5bcf136c54d6ab92a, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/c0795185bb464c779a85e71ca0408af8, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/b2f525cde39d4b91977b76a885fcda22] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp, totalSize=99.1 K 2024-12-08T00:21:17,387 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:17,387 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/d9b9fb42a3744aec8d27da0d042cd1a5, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/f00285bc3bea43cf8e091e46b36c3625, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/b1b743ce0c7440ac9ce3c3f5bfefb50c] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp, totalSize=35.2 K 2024-12-08T00:21:17,387 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. files: [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/432fd7fceb644da5bcf136c54d6ab92a, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/c0795185bb464c779a85e71ca0408af8, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/b2f525cde39d4b91977b76a885fcda22] 2024-12-08T00:21:17,388 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 432fd7fceb644da5bcf136c54d6ab92a, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1733617275147 2024-12-08T00:21:17,388 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting d9b9fb42a3744aec8d27da0d042cd1a5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1733617275147 2024-12-08T00:21:17,388 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting c0795185bb464c779a85e71ca0408af8, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1733617275160 2024-12-08T00:21:17,388 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting f00285bc3bea43cf8e091e46b36c3625, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1733617275160 2024-12-08T00:21:17,389 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting b1b743ce0c7440ac9ce3c3f5bfefb50c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733617276288 2024-12-08T00:21:17,389 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting b2f525cde39d4b91977b76a885fcda22, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733617276287 2024-12-08T00:21:17,395 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 46a39620848480c2f6f28f4fa1ea64a8#B#compaction#368 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:17,396 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/46e9a37b699140719030fa899d62d1c5 is 50, key is test_row_0/B:col10/1733617276900/Put/seqid=0 2024-12-08T00:21:17,396 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=46a39620848480c2f6f28f4fa1ea64a8] 2024-12-08T00:21:17,399 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412082c6939ebe56c4984acf598f83fbe3dfa_46a39620848480c2f6f28f4fa1ea64a8 store=[table=TestAcidGuarantees family=A region=46a39620848480c2f6f28f4fa1ea64a8] 2024-12-08T00:21:17,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742258_1434 (size=12104) 2024-12-08T00:21:17,401 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:17,401 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412082c6939ebe56c4984acf598f83fbe3dfa_46a39620848480c2f6f28f4fa1ea64a8, store=[table=TestAcidGuarantees family=A region=46a39620848480c2f6f28f4fa1ea64a8] 2024-12-08T00:21:17,402 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412082c6939ebe56c4984acf598f83fbe3dfa_46a39620848480c2f6f28f4fa1ea64a8 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=46a39620848480c2f6f28f4fa1ea64a8] 2024-12-08T00:21:17,403 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-08T00:21:17,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:17,403 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2837): Flushing 46a39620848480c2f6f28f4fa1ea64a8 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-08T00:21:17,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=A 2024-12-08T00:21:17,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:17,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=B 2024-12-08T00:21:17,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:17,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=C 2024-12-08T00:21:17,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:17,409 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/46e9a37b699140719030fa899d62d1c5 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/46e9a37b699140719030fa899d62d1c5 2024-12-08T00:21:17,414 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 46a39620848480c2f6f28f4fa1ea64a8/B of 46a39620848480c2f6f28f4fa1ea64a8 into 46e9a37b699140719030fa899d62d1c5(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:17,414 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 46a39620848480c2f6f28f4fa1ea64a8: 2024-12-08T00:21:17,414 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8., storeName=46a39620848480c2f6f28f4fa1ea64a8/B, priority=13, startTime=1733617277386; duration=0sec 2024-12-08T00:21:17,414 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:21:17,414 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 46a39620848480c2f6f28f4fa1ea64a8:B 2024-12-08T00:21:17,414 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:21:17,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742259_1435 (size=4469) 2024-12-08T00:21:17,416 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:21:17,416 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): 46a39620848480c2f6f28f4fa1ea64a8/C is initiating minor compaction (all files) 2024-12-08T00:21:17,416 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 46a39620848480c2f6f28f4fa1ea64a8/C in TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:17,416 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/261732359f6842bbb6a3b7ea60dcfe59, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/10052a952488465f807ca27c14d53f8e, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/7e7e42ed3632403a8b7223153ef72e3e] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp, totalSize=35.2 K 2024-12-08T00:21:17,416 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 261732359f6842bbb6a3b7ea60dcfe59, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1733617275147 2024-12-08T00:21:17,417 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 10052a952488465f807ca27c14d53f8e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1733617275160 2024-12-08T00:21:17,417 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 7e7e42ed3632403a8b7223153ef72e3e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733617276288 2024-12-08T00:21:17,418 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 46a39620848480c2f6f28f4fa1ea64a8#A#compaction#369 average throughput is 1.11 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:17,419 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/5d0f2c340dc34b6b93dd355eae93947f is 175, key is test_row_0/A:col10/1733617276900/Put/seqid=0 2024-12-08T00:21:17,438 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 46a39620848480c2f6f28f4fa1ea64a8#C#compaction#370 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:17,439 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/e7031ba6eb49401698271504ffe1960e is 50, key is test_row_0/C:col10/1733617276900/Put/seqid=0 2024-12-08T00:21:17,442 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412087c6c49c56b6d4de0a95c8d0fbc7064d9_46a39620848480c2f6f28f4fa1ea64a8 is 50, key is test_row_0/A:col10/1733617276933/Put/seqid=0 2024-12-08T00:21:17,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742260_1436 (size=31058) 2024-12-08T00:21:17,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742261_1437 (size=12154) 2024-12-08T00:21:17,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:17,466 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412087c6c49c56b6d4de0a95c8d0fbc7064d9_46a39620848480c2f6f28f4fa1ea64a8 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412087c6c49c56b6d4de0a95c8d0fbc7064d9_46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:17,467 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/f4ddf32f53ce460b8361ec3739351c2b, store: [table=TestAcidGuarantees family=A region=46a39620848480c2f6f28f4fa1ea64a8] 2024-12-08T00:21:17,467 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/f4ddf32f53ce460b8361ec3739351c2b is 175, key is test_row_0/A:col10/1733617276933/Put/seqid=0 2024-12-08T00:21:17,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742262_1438 (size=12104) 2024-12-08T00:21:17,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742263_1439 (size=30955) 2024-12-08T00:21:17,477 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=78, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/f4ddf32f53ce460b8361ec3739351c2b 2024-12-08T00:21:17,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/c1ed2aa072d341c6a9a35b922aecad3e is 50, key is test_row_0/B:col10/1733617276933/Put/seqid=0 2024-12-08T00:21:17,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742264_1440 (size=12001) 2024-12-08T00:21:17,489 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/c1ed2aa072d341c6a9a35b922aecad3e 2024-12-08T00:21:17,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/a3f21c7339cb4cc4a7dceeacbd746d2e is 50, key is test_row_0/C:col10/1733617276933/Put/seqid=0 2024-12-08T00:21:17,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742265_1441 (size=12001) 2024-12-08T00:21:17,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-08T00:21:17,605 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. as already flushing 2024-12-08T00:21:17,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on 46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:17,617 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:17,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617337610, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:17,618 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:17,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617337611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:17,618 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:17,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617337616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:17,625 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:17,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617337617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:17,626 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:17,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617337618, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:17,725 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:17,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617337718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:17,725 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:17,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617337719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:17,725 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:17,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617337719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:17,734 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:17,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617337726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:17,734 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:17,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617337727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:17,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-08T00:21:17,852 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/5d0f2c340dc34b6b93dd355eae93947f as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/5d0f2c340dc34b6b93dd355eae93947f 2024-12-08T00:21:17,856 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 46a39620848480c2f6f28f4fa1ea64a8/A of 46a39620848480c2f6f28f4fa1ea64a8 into 5d0f2c340dc34b6b93dd355eae93947f(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:17,856 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 46a39620848480c2f6f28f4fa1ea64a8: 2024-12-08T00:21:17,856 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8., storeName=46a39620848480c2f6f28f4fa1ea64a8/A, priority=13, startTime=1733617277386; duration=0sec 2024-12-08T00:21:17,856 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:17,856 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 46a39620848480c2f6f28f4fa1ea64a8:A 2024-12-08T00:21:17,875 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/e7031ba6eb49401698271504ffe1960e as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/e7031ba6eb49401698271504ffe1960e 2024-12-08T00:21:17,880 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 46a39620848480c2f6f28f4fa1ea64a8/C of 46a39620848480c2f6f28f4fa1ea64a8 into e7031ba6eb49401698271504ffe1960e(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:17,880 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 46a39620848480c2f6f28f4fa1ea64a8: 2024-12-08T00:21:17,880 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8., storeName=46a39620848480c2f6f28f4fa1ea64a8/C, priority=13, startTime=1733617277386; duration=0sec 2024-12-08T00:21:17,880 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:17,880 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 46a39620848480c2f6f28f4fa1ea64a8:C 2024-12-08T00:21:17,900 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/a3f21c7339cb4cc4a7dceeacbd746d2e 2024-12-08T00:21:17,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/f4ddf32f53ce460b8361ec3739351c2b as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/f4ddf32f53ce460b8361ec3739351c2b 2024-12-08T00:21:17,909 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/f4ddf32f53ce460b8361ec3739351c2b, entries=150, sequenceid=78, filesize=30.2 K 2024-12-08T00:21:17,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/c1ed2aa072d341c6a9a35b922aecad3e as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/c1ed2aa072d341c6a9a35b922aecad3e 2024-12-08T00:21:17,913 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/c1ed2aa072d341c6a9a35b922aecad3e, entries=150, sequenceid=78, filesize=11.7 K 2024-12-08T00:21:17,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/a3f21c7339cb4cc4a7dceeacbd746d2e as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/a3f21c7339cb4cc4a7dceeacbd746d2e 2024-12-08T00:21:17,917 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/a3f21c7339cb4cc4a7dceeacbd746d2e, entries=150, sequenceid=78, filesize=11.7 K 2024-12-08T00:21:17,918 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for 46a39620848480c2f6f28f4fa1ea64a8 in 515ms, sequenceid=78, compaction requested=false 2024-12-08T00:21:17,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2538): Flush status journal for 46a39620848480c2f6f28f4fa1ea64a8: 2024-12-08T00:21:17,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:17,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=109 2024-12-08T00:21:17,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4106): Remote procedure done, pid=109 2024-12-08T00:21:17,921 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=109, resume processing ppid=108 2024-12-08T00:21:17,921 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=109, ppid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 671 msec 2024-12-08T00:21:17,923 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees in 675 msec 2024-12-08T00:21:17,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on 46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:17,928 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 46a39620848480c2f6f28f4fa1ea64a8 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-08T00:21:17,928 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=A 2024-12-08T00:21:17,928 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:17,928 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=B 2024-12-08T00:21:17,928 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:17,928 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=C 2024-12-08T00:21:17,928 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:17,934 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208666bb72144f945b6ad1ea9c8d4622a37_46a39620848480c2f6f28f4fa1ea64a8 is 50, key is test_row_0/A:col10/1733617277616/Put/seqid=0 2024-12-08T00:21:17,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742266_1442 (size=14594) 2024-12-08T00:21:17,940 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:17,944 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208666bb72144f945b6ad1ea9c8d4622a37_46a39620848480c2f6f28f4fa1ea64a8 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208666bb72144f945b6ad1ea9c8d4622a37_46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:17,945 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/72337da115a24f76881634791b7b8751, store: [table=TestAcidGuarantees family=A region=46a39620848480c2f6f28f4fa1ea64a8] 2024-12-08T00:21:17,946 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/72337da115a24f76881634791b7b8751 is 175, key is test_row_0/A:col10/1733617277616/Put/seqid=0 2024-12-08T00:21:17,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742267_1443 (size=39549) 2024-12-08T00:21:17,964 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:17,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617337957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:17,967 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:17,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617337960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:17,967 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:17,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617337961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:17,969 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:17,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617337963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:17,969 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:17,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617337964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:18,071 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:18,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617338065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:18,072 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:18,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617338068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:18,072 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:18,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617338068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:18,072 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:18,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617338069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:18,073 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:18,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617338070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:18,275 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:18,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617338273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:18,277 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:18,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617338273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:18,277 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:18,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617338274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:18,278 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:18,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617338274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:18,278 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:18,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617338275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:18,350 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=95, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/72337da115a24f76881634791b7b8751 2024-12-08T00:21:18,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-08T00:21:18,352 INFO [Thread-1871 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 108 completed 2024-12-08T00:21:18,354 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T00:21:18,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees 2024-12-08T00:21:18,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-08T00:21:18,356 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:21:18,357 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:21:18,357 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:21:18,359 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/da5cd855884a495dadc2e07938549328 is 50, key is test_row_0/B:col10/1733617277616/Put/seqid=0 2024-12-08T00:21:18,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742268_1444 (size=12001) 2024-12-08T00:21:18,363 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=95 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/da5cd855884a495dadc2e07938549328 2024-12-08T00:21:18,373 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/50fd0e762c4144e68c610dd845fad2bb is 50, key is test_row_0/C:col10/1733617277616/Put/seqid=0 2024-12-08T00:21:18,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742269_1445 (size=12001) 2024-12-08T00:21:18,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-08T00:21:18,508 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:18,509 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-12-08T00:21:18,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:18,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. as already flushing 2024-12-08T00:21:18,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:18,509 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:18,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:18,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:18,579 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:18,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617338578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:18,579 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:18,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617338578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:18,584 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:18,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617338580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:18,584 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:18,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617338581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:18,584 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:18,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617338581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:18,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-08T00:21:18,661 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:18,662 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-12-08T00:21:18,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:18,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. as already flushing 2024-12-08T00:21:18,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:18,662 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:18,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:18,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:18,779 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=95 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/50fd0e762c4144e68c610dd845fad2bb 2024-12-08T00:21:18,789 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/72337da115a24f76881634791b7b8751 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/72337da115a24f76881634791b7b8751 2024-12-08T00:21:18,793 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/72337da115a24f76881634791b7b8751, entries=200, sequenceid=95, filesize=38.6 K 2024-12-08T00:21:18,794 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/da5cd855884a495dadc2e07938549328 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/da5cd855884a495dadc2e07938549328 2024-12-08T00:21:18,797 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/da5cd855884a495dadc2e07938549328, entries=150, sequenceid=95, filesize=11.7 K 2024-12-08T00:21:18,798 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/50fd0e762c4144e68c610dd845fad2bb as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/50fd0e762c4144e68c610dd845fad2bb 2024-12-08T00:21:18,801 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/50fd0e762c4144e68c610dd845fad2bb, entries=150, sequenceid=95, filesize=11.7 K 2024-12-08T00:21:18,802 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 46a39620848480c2f6f28f4fa1ea64a8 in 874ms, sequenceid=95, compaction requested=true 2024-12-08T00:21:18,802 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 46a39620848480c2f6f28f4fa1ea64a8: 2024-12-08T00:21:18,802 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 46a39620848480c2f6f28f4fa1ea64a8:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:21:18,802 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:18,802 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 46a39620848480c2f6f28f4fa1ea64a8:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T00:21:18,802 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:21:18,802 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:21:18,802 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 46a39620848480c2f6f28f4fa1ea64a8:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:21:18,802 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-08T00:21:18,802 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:21:18,803 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:21:18,803 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): 46a39620848480c2f6f28f4fa1ea64a8/B is initiating minor compaction (all files) 2024-12-08T00:21:18,803 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 46a39620848480c2f6f28f4fa1ea64a8/B in TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:18,803 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/46e9a37b699140719030fa899d62d1c5, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/c1ed2aa072d341c6a9a35b922aecad3e, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/da5cd855884a495dadc2e07938549328] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp, totalSize=35.3 K 2024-12-08T00:21:18,804 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101562 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:21:18,804 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): 46a39620848480c2f6f28f4fa1ea64a8/A is initiating minor compaction (all files) 2024-12-08T00:21:18,804 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 46a39620848480c2f6f28f4fa1ea64a8/A in TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:18,804 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/5d0f2c340dc34b6b93dd355eae93947f, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/f4ddf32f53ce460b8361ec3739351c2b, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/72337da115a24f76881634791b7b8751] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp, totalSize=99.2 K 2024-12-08T00:21:18,804 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:18,804 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. files: [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/5d0f2c340dc34b6b93dd355eae93947f, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/f4ddf32f53ce460b8361ec3739351c2b, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/72337da115a24f76881634791b7b8751] 2024-12-08T00:21:18,804 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 46e9a37b699140719030fa899d62d1c5, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733617276288 2024-12-08T00:21:18,804 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5d0f2c340dc34b6b93dd355eae93947f, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733617276288 2024-12-08T00:21:18,805 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting c1ed2aa072d341c6a9a35b922aecad3e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733617276930 2024-12-08T00:21:18,805 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting f4ddf32f53ce460b8361ec3739351c2b, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733617276930 2024-12-08T00:21:18,805 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 72337da115a24f76881634791b7b8751, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1733617277609 2024-12-08T00:21:18,805 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting da5cd855884a495dadc2e07938549328, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1733617277616 2024-12-08T00:21:18,811 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=46a39620848480c2f6f28f4fa1ea64a8] 2024-12-08T00:21:18,813 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 46a39620848480c2f6f28f4fa1ea64a8#B#compaction#377 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:18,813 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/05bc8cf86dea466dba32cd1f1164f421 is 50, key is test_row_0/B:col10/1733617277616/Put/seqid=0 2024-12-08T00:21:18,815 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:18,815 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-12-08T00:21:18,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:18,815 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2837): Flushing 46a39620848480c2f6f28f4fa1ea64a8 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-08T00:21:18,816 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241208f1041de5a1e34b64959fc2d225ca61d1_46a39620848480c2f6f28f4fa1ea64a8 store=[table=TestAcidGuarantees family=A region=46a39620848480c2f6f28f4fa1ea64a8] 2024-12-08T00:21:18,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=A 2024-12-08T00:21:18,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:18,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=B 2024-12-08T00:21:18,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:18,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=C 2024-12-08T00:21:18,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:18,817 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241208f1041de5a1e34b64959fc2d225ca61d1_46a39620848480c2f6f28f4fa1ea64a8, store=[table=TestAcidGuarantees family=A region=46a39620848480c2f6f28f4fa1ea64a8] 2024-12-08T00:21:18,818 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208f1041de5a1e34b64959fc2d225ca61d1_46a39620848480c2f6f28f4fa1ea64a8 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=46a39620848480c2f6f28f4fa1ea64a8] 2024-12-08T00:21:18,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742270_1446 (size=12207) 2024-12-08T00:21:18,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208d609ebb944164450b565447816a216f1_46a39620848480c2f6f28f4fa1ea64a8 is 50, key is test_row_0/A:col10/1733617277959/Put/seqid=0 2024-12-08T00:21:18,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742271_1447 (size=4469) 2024-12-08T00:21:18,846 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 46a39620848480c2f6f28f4fa1ea64a8#A#compaction#378 average throughput is 0.70 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:18,846 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/bbc23103258d42caaa6524b96b65c1c6 is 175, key is test_row_0/A:col10/1733617277616/Put/seqid=0 2024-12-08T00:21:18,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742272_1448 (size=12154) 2024-12-08T00:21:18,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:18,862 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208d609ebb944164450b565447816a216f1_46a39620848480c2f6f28f4fa1ea64a8 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208d609ebb944164450b565447816a216f1_46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:18,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/3b23fb5c7616424fb55261e096154cc3, store: [table=TestAcidGuarantees family=A region=46a39620848480c2f6f28f4fa1ea64a8] 2024-12-08T00:21:18,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/3b23fb5c7616424fb55261e096154cc3 is 175, key is test_row_0/A:col10/1733617277959/Put/seqid=0 2024-12-08T00:21:18,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742273_1449 (size=31161) 2024-12-08T00:21:18,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742274_1450 (size=30955) 2024-12-08T00:21:18,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-08T00:21:19,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on 46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:19,086 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. as already flushing 2024-12-08T00:21:19,101 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:19,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617339095, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:19,103 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:19,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617339096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:19,104 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:19,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617339098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:19,104 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:19,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617339101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:19,107 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:19,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617339102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:19,205 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:19,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617339203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:19,206 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:19,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617339204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:19,209 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:19,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617339205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:19,209 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:19,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617339205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:19,214 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:19,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617339208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:19,226 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/05bc8cf86dea466dba32cd1f1164f421 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/05bc8cf86dea466dba32cd1f1164f421 2024-12-08T00:21:19,231 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 46a39620848480c2f6f28f4fa1ea64a8/B of 46a39620848480c2f6f28f4fa1ea64a8 into 05bc8cf86dea466dba32cd1f1164f421(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:19,231 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 46a39620848480c2f6f28f4fa1ea64a8: 2024-12-08T00:21:19,231 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8., storeName=46a39620848480c2f6f28f4fa1ea64a8/B, priority=13, startTime=1733617278802; duration=0sec 2024-12-08T00:21:19,231 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:21:19,231 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 46a39620848480c2f6f28f4fa1ea64a8:B 2024-12-08T00:21:19,231 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:21:19,232 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:21:19,232 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): 46a39620848480c2f6f28f4fa1ea64a8/C is initiating minor compaction (all files) 2024-12-08T00:21:19,232 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 46a39620848480c2f6f28f4fa1ea64a8/C in TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:19,232 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/e7031ba6eb49401698271504ffe1960e, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/a3f21c7339cb4cc4a7dceeacbd746d2e, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/50fd0e762c4144e68c610dd845fad2bb] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp, totalSize=35.3 K 2024-12-08T00:21:19,232 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting e7031ba6eb49401698271504ffe1960e, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733617276288 2024-12-08T00:21:19,233 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting a3f21c7339cb4cc4a7dceeacbd746d2e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733617276930 2024-12-08T00:21:19,233 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 50fd0e762c4144e68c610dd845fad2bb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1733617277616 2024-12-08T00:21:19,239 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 46a39620848480c2f6f28f4fa1ea64a8#C#compaction#380 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:19,240 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/e58f925e1483440a836addb7a26ca68c is 50, key is test_row_0/C:col10/1733617277616/Put/seqid=0 2024-12-08T00:21:19,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742275_1451 (size=12207) 2024-12-08T00:21:19,254 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/e58f925e1483440a836addb7a26ca68c as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/e58f925e1483440a836addb7a26ca68c 2024-12-08T00:21:19,258 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 46a39620848480c2f6f28f4fa1ea64a8/C of 46a39620848480c2f6f28f4fa1ea64a8 into e58f925e1483440a836addb7a26ca68c(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:19,258 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 46a39620848480c2f6f28f4fa1ea64a8: 2024-12-08T00:21:19,258 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8., storeName=46a39620848480c2f6f28f4fa1ea64a8/C, priority=13, startTime=1733617278802; duration=0sec 2024-12-08T00:21:19,258 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:19,258 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 46a39620848480c2f6f28f4fa1ea64a8:C 2024-12-08T00:21:19,275 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=117, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/3b23fb5c7616424fb55261e096154cc3 2024-12-08T00:21:19,278 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/bbc23103258d42caaa6524b96b65c1c6 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/bbc23103258d42caaa6524b96b65c1c6 2024-12-08T00:21:19,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/0f6aac3b862b4002a802362c13542ef5 is 50, key is test_row_0/B:col10/1733617277959/Put/seqid=0 2024-12-08T00:21:19,291 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 46a39620848480c2f6f28f4fa1ea64a8/A of 46a39620848480c2f6f28f4fa1ea64a8 into bbc23103258d42caaa6524b96b65c1c6(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:19,291 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 46a39620848480c2f6f28f4fa1ea64a8: 2024-12-08T00:21:19,291 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8., storeName=46a39620848480c2f6f28f4fa1ea64a8/A, priority=13, startTime=1733617278802; duration=0sec 2024-12-08T00:21:19,291 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:19,291 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 46a39620848480c2f6f28f4fa1ea64a8:A 2024-12-08T00:21:19,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742276_1452 (size=12001) 2024-12-08T00:21:19,411 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:19,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617339407, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:19,411 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:19,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617339408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:19,413 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:19,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617339410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:19,413 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:19,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617339411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:19,419 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:19,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617339416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:19,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-08T00:21:19,698 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/0f6aac3b862b4002a802362c13542ef5 2024-12-08T00:21:19,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/9984117ab6c44fcdb62f2dc93cfb304a is 50, key is test_row_0/C:col10/1733617277959/Put/seqid=0 2024-12-08T00:21:19,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742277_1453 (size=12001) 2024-12-08T00:21:19,716 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/9984117ab6c44fcdb62f2dc93cfb304a 2024-12-08T00:21:19,717 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:19,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617339714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:19,718 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:19,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617339714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:19,718 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:19,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617339714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:19,718 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:19,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617339715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:19,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/3b23fb5c7616424fb55261e096154cc3 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/3b23fb5c7616424fb55261e096154cc3 2024-12-08T00:21:19,725 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:19,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617339721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:19,726 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/3b23fb5c7616424fb55261e096154cc3, entries=150, sequenceid=117, filesize=30.2 K 2024-12-08T00:21:19,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/0f6aac3b862b4002a802362c13542ef5 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/0f6aac3b862b4002a802362c13542ef5 2024-12-08T00:21:19,730 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/0f6aac3b862b4002a802362c13542ef5, entries=150, sequenceid=117, filesize=11.7 K 2024-12-08T00:21:19,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/9984117ab6c44fcdb62f2dc93cfb304a as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/9984117ab6c44fcdb62f2dc93cfb304a 2024-12-08T00:21:19,734 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/9984117ab6c44fcdb62f2dc93cfb304a, entries=150, sequenceid=117, filesize=11.7 K 2024-12-08T00:21:19,735 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 46a39620848480c2f6f28f4fa1ea64a8 in 920ms, sequenceid=117, compaction requested=false 2024-12-08T00:21:19,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2538): Flush status journal for 46a39620848480c2f6f28f4fa1ea64a8: 2024-12-08T00:21:19,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:19,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=111 2024-12-08T00:21:19,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4106): Remote procedure done, pid=111 2024-12-08T00:21:19,744 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=111, resume processing ppid=110 2024-12-08T00:21:19,744 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=111, ppid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3860 sec 2024-12-08T00:21:19,746 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees in 1.3910 sec 2024-12-08T00:21:20,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on 46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:20,224 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 46a39620848480c2f6f28f4fa1ea64a8 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-08T00:21:20,224 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=A 2024-12-08T00:21:20,224 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:20,224 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=B 2024-12-08T00:21:20,224 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:20,224 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=C 2024-12-08T00:21:20,224 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:20,232 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208b6b412d720c048a4b0dfe4bdbe5d0b57_46a39620848480c2f6f28f4fa1ea64a8 is 50, key is test_row_0/A:col10/1733617279085/Put/seqid=0 2024-12-08T00:21:20,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742278_1454 (size=14694) 2024-12-08T00:21:20,248 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:20,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617340242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:20,250 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:20,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617340245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:20,256 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:20,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617340246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:20,256 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:20,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617340247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:20,256 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:20,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617340248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:20,352 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:20,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617340349, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:20,354 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:20,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617340351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:20,359 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:20,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617340357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:20,359 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:20,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617340357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:20,360 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:20,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617340357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:20,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-08T00:21:20,461 INFO [Thread-1871 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 110 completed 2024-12-08T00:21:20,462 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T00:21:20,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees 2024-12-08T00:21:20,465 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:21:20,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-08T00:21:20,465 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:21:20,465 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=113, ppid=112, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:21:20,555 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:20,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617340554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:20,559 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:20,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617340556, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:20,562 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:20,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617340560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:20,564 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:20,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617340561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:20,564 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:20,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617340561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:20,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-08T00:21:20,617 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:20,618 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-08T00:21:20,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:20,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. as already flushing 2024-12-08T00:21:20,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:20,618 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:20,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:20,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:20,637 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:20,640 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208b6b412d720c048a4b0dfe4bdbe5d0b57_46a39620848480c2f6f28f4fa1ea64a8 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208b6b412d720c048a4b0dfe4bdbe5d0b57_46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:20,641 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/94bb316903ed47fab5dcec0d594ab19c, store: [table=TestAcidGuarantees family=A region=46a39620848480c2f6f28f4fa1ea64a8] 2024-12-08T00:21:20,642 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/94bb316903ed47fab5dcec0d594ab19c is 175, key is test_row_0/A:col10/1733617279085/Put/seqid=0 2024-12-08T00:21:20,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742279_1455 (size=39649) 2024-12-08T00:21:20,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-08T00:21:20,771 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:20,771 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-08T00:21:20,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:20,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. as already flushing 2024-12-08T00:21:20,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:20,772 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:20,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:20,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:20,859 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:20,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617340856, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:20,867 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:20,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617340862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:20,869 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:20,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617340865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:20,869 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:20,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617340865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:20,872 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:20,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617340866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:20,923 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:20,923 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-08T00:21:20,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:20,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. as already flushing 2024-12-08T00:21:20,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:20,924 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:20,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:20,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:21,047 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=135, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/94bb316903ed47fab5dcec0d594ab19c 2024-12-08T00:21:21,054 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/25fb4a3f891a4a1ea44eb0a845b2858b is 50, key is test_row_0/B:col10/1733617279085/Put/seqid=0 2024-12-08T00:21:21,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742280_1456 (size=12101) 2024-12-08T00:21:21,060 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=135 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/25fb4a3f891a4a1ea44eb0a845b2858b 2024-12-08T00:21:21,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-08T00:21:21,068 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/c8986d7937c2444fbfcf4b65e32d3033 is 50, key is test_row_0/C:col10/1733617279085/Put/seqid=0 2024-12-08T00:21:21,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742281_1457 (size=12101) 2024-12-08T00:21:21,075 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:21,076 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-08T00:21:21,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:21,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. as already flushing 2024-12-08T00:21:21,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:21,076 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:21,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:21,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:21,228 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:21,229 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-08T00:21:21,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:21,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. as already flushing 2024-12-08T00:21:21,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:21,229 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:21,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:21,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:21,366 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:21,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617341361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:21,376 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:21,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617341371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:21,377 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:21,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617341374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:21,377 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:21,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617341374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:21,377 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:21,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617341376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:21,381 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:21,381 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-08T00:21:21,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:21,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. as already flushing 2024-12-08T00:21:21,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:21,382 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:21,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:21,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:21,472 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=135 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/c8986d7937c2444fbfcf4b65e32d3033 2024-12-08T00:21:21,476 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/94bb316903ed47fab5dcec0d594ab19c as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/94bb316903ed47fab5dcec0d594ab19c 2024-12-08T00:21:21,480 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/94bb316903ed47fab5dcec0d594ab19c, entries=200, sequenceid=135, filesize=38.7 K 2024-12-08T00:21:21,481 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/25fb4a3f891a4a1ea44eb0a845b2858b as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/25fb4a3f891a4a1ea44eb0a845b2858b 2024-12-08T00:21:21,483 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/25fb4a3f891a4a1ea44eb0a845b2858b, entries=150, sequenceid=135, filesize=11.8 K 2024-12-08T00:21:21,484 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/c8986d7937c2444fbfcf4b65e32d3033 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/c8986d7937c2444fbfcf4b65e32d3033 2024-12-08T00:21:21,488 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/c8986d7937c2444fbfcf4b65e32d3033, entries=150, sequenceid=135, filesize=11.8 K 2024-12-08T00:21:21,489 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 46a39620848480c2f6f28f4fa1ea64a8 in 1266ms, sequenceid=135, compaction requested=true 2024-12-08T00:21:21,489 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 46a39620848480c2f6f28f4fa1ea64a8: 2024-12-08T00:21:21,490 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 46a39620848480c2f6f28f4fa1ea64a8:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:21:21,490 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:21,490 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:21:21,490 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 46a39620848480c2f6f28f4fa1ea64a8:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T00:21:21,490 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:21,490 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:21:21,490 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 46a39620848480c2f6f28f4fa1ea64a8:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:21:21,490 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:21:21,491 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:21:21,491 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): 46a39620848480c2f6f28f4fa1ea64a8/B is initiating minor compaction (all files) 2024-12-08T00:21:21,491 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 46a39620848480c2f6f28f4fa1ea64a8/B in TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:21,491 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/05bc8cf86dea466dba32cd1f1164f421, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/0f6aac3b862b4002a802362c13542ef5, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/25fb4a3f891a4a1ea44eb0a845b2858b] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp, totalSize=35.5 K 2024-12-08T00:21:21,491 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101765 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:21:21,491 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): 46a39620848480c2f6f28f4fa1ea64a8/A is initiating minor compaction (all files) 2024-12-08T00:21:21,491 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 46a39620848480c2f6f28f4fa1ea64a8/A in TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:21,491 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/bbc23103258d42caaa6524b96b65c1c6, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/3b23fb5c7616424fb55261e096154cc3, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/94bb316903ed47fab5dcec0d594ab19c] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp, totalSize=99.4 K 2024-12-08T00:21:21,491 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:21,491 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. files: [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/bbc23103258d42caaa6524b96b65c1c6, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/3b23fb5c7616424fb55261e096154cc3, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/94bb316903ed47fab5dcec0d594ab19c] 2024-12-08T00:21:21,492 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 05bc8cf86dea466dba32cd1f1164f421, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1733617277616 2024-12-08T00:21:21,492 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting bbc23103258d42caaa6524b96b65c1c6, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1733617277616 2024-12-08T00:21:21,492 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3b23fb5c7616424fb55261e096154cc3, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1733617277959 2024-12-08T00:21:21,492 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 0f6aac3b862b4002a802362c13542ef5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1733617277959 2024-12-08T00:21:21,493 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 25fb4a3f891a4a1ea44eb0a845b2858b, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1733617279085 2024-12-08T00:21:21,493 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 94bb316903ed47fab5dcec0d594ab19c, keycount=200, bloomtype=ROW, size=38.7 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1733617279085 2024-12-08T00:21:21,500 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 46a39620848480c2f6f28f4fa1ea64a8#B#compaction#386 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:21,500 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=46a39620848480c2f6f28f4fa1ea64a8] 2024-12-08T00:21:21,501 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/2f05f8f6986a44ab8e085afd16b1cebb is 50, key is test_row_0/B:col10/1733617279085/Put/seqid=0 2024-12-08T00:21:21,502 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412087e7c0a75031f4a959179f3c6d6a1e14b_46a39620848480c2f6f28f4fa1ea64a8 store=[table=TestAcidGuarantees family=A region=46a39620848480c2f6f28f4fa1ea64a8] 2024-12-08T00:21:21,504 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412087e7c0a75031f4a959179f3c6d6a1e14b_46a39620848480c2f6f28f4fa1ea64a8, store=[table=TestAcidGuarantees family=A region=46a39620848480c2f6f28f4fa1ea64a8] 2024-12-08T00:21:21,505 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412087e7c0a75031f4a959179f3c6d6a1e14b_46a39620848480c2f6f28f4fa1ea64a8 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=46a39620848480c2f6f28f4fa1ea64a8] 2024-12-08T00:21:21,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742282_1458 (size=12409) 2024-12-08T00:21:21,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742283_1459 (size=4469) 2024-12-08T00:21:21,512 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 46a39620848480c2f6f28f4fa1ea64a8#A#compaction#387 average throughput is 2.22 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:21,512 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/e887e0001a9541df99dab83d706c1c3c is 175, key is test_row_0/A:col10/1733617279085/Put/seqid=0 2024-12-08T00:21:21,513 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/2f05f8f6986a44ab8e085afd16b1cebb as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/2f05f8f6986a44ab8e085afd16b1cebb 2024-12-08T00:21:21,518 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 46a39620848480c2f6f28f4fa1ea64a8/B of 46a39620848480c2f6f28f4fa1ea64a8 into 2f05f8f6986a44ab8e085afd16b1cebb(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:21,518 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 46a39620848480c2f6f28f4fa1ea64a8: 2024-12-08T00:21:21,518 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8., storeName=46a39620848480c2f6f28f4fa1ea64a8/B, priority=13, startTime=1733617281490; duration=0sec 2024-12-08T00:21:21,518 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:21:21,518 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 46a39620848480c2f6f28f4fa1ea64a8:B 2024-12-08T00:21:21,518 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:21:21,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742284_1460 (size=31363) 2024-12-08T00:21:21,519 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:21:21,519 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): 46a39620848480c2f6f28f4fa1ea64a8/C is initiating minor compaction (all files) 2024-12-08T00:21:21,519 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 46a39620848480c2f6f28f4fa1ea64a8/C in TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:21,519 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/e58f925e1483440a836addb7a26ca68c, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/9984117ab6c44fcdb62f2dc93cfb304a, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/c8986d7937c2444fbfcf4b65e32d3033] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp, totalSize=35.5 K 2024-12-08T00:21:21,520 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting e58f925e1483440a836addb7a26ca68c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1733617277616 2024-12-08T00:21:21,520 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 9984117ab6c44fcdb62f2dc93cfb304a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1733617277959 2024-12-08T00:21:21,521 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting c8986d7937c2444fbfcf4b65e32d3033, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1733617279085 2024-12-08T00:21:21,525 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 46a39620848480c2f6f28f4fa1ea64a8#C#compaction#388 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:21,526 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/9b359f923f6b4cccb6b82f054c95e14e is 50, key is test_row_0/C:col10/1733617279085/Put/seqid=0 2024-12-08T00:21:21,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742285_1461 (size=12409) 2024-12-08T00:21:21,532 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/9b359f923f6b4cccb6b82f054c95e14e as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/9b359f923f6b4cccb6b82f054c95e14e 2024-12-08T00:21:21,534 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:21,534 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-08T00:21:21,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:21,534 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2837): Flushing 46a39620848480c2f6f28f4fa1ea64a8 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-08T00:21:21,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=A 2024-12-08T00:21:21,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:21,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=B 2024-12-08T00:21:21,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:21,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=C 2024-12-08T00:21:21,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:21,536 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 46a39620848480c2f6f28f4fa1ea64a8/C of 46a39620848480c2f6f28f4fa1ea64a8 into 9b359f923f6b4cccb6b82f054c95e14e(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:21,536 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 46a39620848480c2f6f28f4fa1ea64a8: 2024-12-08T00:21:21,536 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8., storeName=46a39620848480c2f6f28f4fa1ea64a8/C, priority=13, startTime=1733617281490; duration=0sec 2024-12-08T00:21:21,536 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:21,536 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 46a39620848480c2f6f28f4fa1ea64a8:C 2024-12-08T00:21:21,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208baf8ed55df934f9f8b3de2a8b4e7b924_46a39620848480c2f6f28f4fa1ea64a8 is 50, key is test_row_0/A:col10/1733617280244/Put/seqid=0 2024-12-08T00:21:21,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742286_1462 (size=12304) 2024-12-08T00:21:21,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-08T00:21:21,923 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/e887e0001a9541df99dab83d706c1c3c as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/e887e0001a9541df99dab83d706c1c3c 2024-12-08T00:21:21,927 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 46a39620848480c2f6f28f4fa1ea64a8/A of 46a39620848480c2f6f28f4fa1ea64a8 into e887e0001a9541df99dab83d706c1c3c(size=30.6 K), total size for store is 30.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:21,927 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 46a39620848480c2f6f28f4fa1ea64a8: 2024-12-08T00:21:21,927 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8., storeName=46a39620848480c2f6f28f4fa1ea64a8/A, priority=13, startTime=1733617281489; duration=0sec 2024-12-08T00:21:21,927 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:21,927 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 46a39620848480c2f6f28f4fa1ea64a8:A 2024-12-08T00:21:21,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:21,947 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208baf8ed55df934f9f8b3de2a8b4e7b924_46a39620848480c2f6f28f4fa1ea64a8 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208baf8ed55df934f9f8b3de2a8b4e7b924_46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:21,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/3a823eaa8ec54fa1825864edef4d0af5, store: [table=TestAcidGuarantees family=A region=46a39620848480c2f6f28f4fa1ea64a8] 2024-12-08T00:21:21,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/3a823eaa8ec54fa1825864edef4d0af5 is 175, key is test_row_0/A:col10/1733617280244/Put/seqid=0 2024-12-08T00:21:21,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742287_1463 (size=31105) 2024-12-08T00:21:22,354 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=157, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/3a823eaa8ec54fa1825864edef4d0af5 2024-12-08T00:21:22,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/dcce2c892476415e9c6b7038ec13c375 is 50, key is test_row_0/B:col10/1733617280244/Put/seqid=0 2024-12-08T00:21:22,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742288_1464 (size=12151) 2024-12-08T00:21:22,370 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/dcce2c892476415e9c6b7038ec13c375 2024-12-08T00:21:22,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/da98f1a22f8549a09bb42e9c7af05e07 is 50, key is test_row_0/C:col10/1733617280244/Put/seqid=0 2024-12-08T00:21:22,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on 46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:22,379 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. as already flushing 2024-12-08T00:21:22,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742289_1465 (size=12151) 2024-12-08T00:21:22,394 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/da98f1a22f8549a09bb42e9c7af05e07 2024-12-08T00:21:22,397 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:22,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617342392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:22,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/3a823eaa8ec54fa1825864edef4d0af5 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/3a823eaa8ec54fa1825864edef4d0af5 2024-12-08T00:21:22,399 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:22,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617342393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:22,400 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:22,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617342394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:22,400 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:22,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617342395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:22,403 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/3a823eaa8ec54fa1825864edef4d0af5, entries=150, sequenceid=157, filesize=30.4 K 2024-12-08T00:21:22,403 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:22,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617342397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:22,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/dcce2c892476415e9c6b7038ec13c375 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/dcce2c892476415e9c6b7038ec13c375 2024-12-08T00:21:22,409 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/dcce2c892476415e9c6b7038ec13c375, entries=150, sequenceid=157, filesize=11.9 K 2024-12-08T00:21:22,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/da98f1a22f8549a09bb42e9c7af05e07 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/da98f1a22f8549a09bb42e9c7af05e07 2024-12-08T00:21:22,413 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/da98f1a22f8549a09bb42e9c7af05e07, entries=150, sequenceid=157, filesize=11.9 K 2024-12-08T00:21:22,413 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 46a39620848480c2f6f28f4fa1ea64a8 in 879ms, sequenceid=157, compaction requested=false 2024-12-08T00:21:22,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2538): Flush status journal for 46a39620848480c2f6f28f4fa1ea64a8: 2024-12-08T00:21:22,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:22,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=113 2024-12-08T00:21:22,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4106): Remote procedure done, pid=113 2024-12-08T00:21:22,416 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=113, resume processing ppid=112 2024-12-08T00:21:22,416 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=113, ppid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9490 sec 2024-12-08T00:21:22,418 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees in 1.9550 sec 2024-12-08T00:21:22,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on 46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:22,500 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 46a39620848480c2f6f28f4fa1ea64a8 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-08T00:21:22,501 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=A 2024-12-08T00:21:22,501 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:22,501 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=B 2024-12-08T00:21:22,501 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:22,501 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=C 2024-12-08T00:21:22,501 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:22,508 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208383118e4278f424880d00fe743c66829_46a39620848480c2f6f28f4fa1ea64a8 is 50, key is test_row_0/A:col10/1733617282392/Put/seqid=0 2024-12-08T00:21:22,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742290_1466 (size=14794) 2024-12-08T00:21:22,528 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:22,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617342521, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:22,528 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:22,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617342522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:22,533 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:22,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617342526, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:22,535 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:22,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617342527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:22,535 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:22,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617342528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:22,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-08T00:21:22,569 INFO [Thread-1871 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 112 completed 2024-12-08T00:21:22,571 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T00:21:22,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees 2024-12-08T00:21:22,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-08T00:21:22,572 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:21:22,573 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:21:22,573 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=115, ppid=114, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:21:22,633 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:22,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617342629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:22,633 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:22,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617342629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:22,638 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:22,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617342634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:22,639 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:22,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617342636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:22,639 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:22,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617342636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:22,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-08T00:21:22,724 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:22,725 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-08T00:21:22,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:22,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. as already flushing 2024-12-08T00:21:22,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:22,725 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:22,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:22,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:22,837 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:22,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617342834, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:22,838 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:22,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617342835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:22,843 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:22,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617342839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:22,843 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:22,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617342840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:22,845 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:22,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617342841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:22,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-08T00:21:22,877 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:22,878 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-08T00:21:22,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:22,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. as already flushing 2024-12-08T00:21:22,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:22,878 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:22,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:22,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:22,912 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:22,916 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208383118e4278f424880d00fe743c66829_46a39620848480c2f6f28f4fa1ea64a8 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208383118e4278f424880d00fe743c66829_46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:22,916 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/41afc61063684884a81a161c0a1a0741, store: [table=TestAcidGuarantees family=A region=46a39620848480c2f6f28f4fa1ea64a8] 2024-12-08T00:21:22,917 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/41afc61063684884a81a161c0a1a0741 is 175, key is test_row_0/A:col10/1733617282392/Put/seqid=0 2024-12-08T00:21:22,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742291_1467 (size=39749) 2024-12-08T00:21:22,927 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=175, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/41afc61063684884a81a161c0a1a0741 2024-12-08T00:21:22,933 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/59145572a6cb484bb84548a2cd029822 is 50, key is test_row_0/B:col10/1733617282392/Put/seqid=0 2024-12-08T00:21:22,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742292_1468 (size=12151) 2024-12-08T00:21:23,030 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:23,030 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-08T00:21:23,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:23,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. as already flushing 2024-12-08T00:21:23,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:23,031 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:23,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:23,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:23,140 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:23,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617343139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:23,143 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:23,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617343141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:23,149 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:23,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617343145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:23,149 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:23,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617343145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:23,149 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:23,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617343146, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:23,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-08T00:21:23,183 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:23,183 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-08T00:21:23,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:23,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. as already flushing 2024-12-08T00:21:23,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:23,184 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:23,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:23,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:23,336 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:23,336 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-08T00:21:23,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:23,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. as already flushing 2024-12-08T00:21:23,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:23,336 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:23,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:23,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:23,338 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=175 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/59145572a6cb484bb84548a2cd029822 2024-12-08T00:21:23,344 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/d990293695e74820bc37d6886294776f is 50, key is test_row_0/C:col10/1733617282392/Put/seqid=0 2024-12-08T00:21:23,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742293_1469 (size=12151) 2024-12-08T00:21:23,488 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:23,489 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-08T00:21:23,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:23,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. as already flushing 2024-12-08T00:21:23,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:23,489 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:23,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:23,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:23,641 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:23,642 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-08T00:21:23,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:23,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. as already flushing 2024-12-08T00:21:23,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:23,642 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:23,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:23,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:23,646 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:23,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617343643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:23,650 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:23,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617343646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:23,656 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:23,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617343651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:23,656 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:23,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617343653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:23,657 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:23,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617343654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:23,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-08T00:21:23,749 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=175 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/d990293695e74820bc37d6886294776f 2024-12-08T00:21:23,752 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/41afc61063684884a81a161c0a1a0741 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/41afc61063684884a81a161c0a1a0741 2024-12-08T00:21:23,756 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/41afc61063684884a81a161c0a1a0741, entries=200, sequenceid=175, filesize=38.8 K 2024-12-08T00:21:23,756 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/59145572a6cb484bb84548a2cd029822 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/59145572a6cb484bb84548a2cd029822 2024-12-08T00:21:23,759 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/59145572a6cb484bb84548a2cd029822, entries=150, sequenceid=175, filesize=11.9 K 2024-12-08T00:21:23,760 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/d990293695e74820bc37d6886294776f as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/d990293695e74820bc37d6886294776f 2024-12-08T00:21:23,763 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/d990293695e74820bc37d6886294776f, entries=150, sequenceid=175, filesize=11.9 K 2024-12-08T00:21:23,764 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 46a39620848480c2f6f28f4fa1ea64a8 in 1264ms, sequenceid=175, compaction requested=true 2024-12-08T00:21:23,764 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 46a39620848480c2f6f28f4fa1ea64a8: 2024-12-08T00:21:23,764 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 46a39620848480c2f6f28f4fa1ea64a8:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:21:23,764 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:23,765 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 46a39620848480c2f6f28f4fa1ea64a8:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T00:21:23,765 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:21:23,765 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:21:23,765 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 46a39620848480c2f6f28f4fa1ea64a8:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:21:23,765 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-08T00:21:23,765 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:21:23,765 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102217 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:21:23,765 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): 46a39620848480c2f6f28f4fa1ea64a8/A is initiating minor compaction (all files) 2024-12-08T00:21:23,765 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36711 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:21:23,765 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 46a39620848480c2f6f28f4fa1ea64a8/A in TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:23,766 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): 46a39620848480c2f6f28f4fa1ea64a8/B is initiating minor compaction (all files) 2024-12-08T00:21:23,766 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/e887e0001a9541df99dab83d706c1c3c, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/3a823eaa8ec54fa1825864edef4d0af5, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/41afc61063684884a81a161c0a1a0741] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp, totalSize=99.8 K 2024-12-08T00:21:23,766 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 46a39620848480c2f6f28f4fa1ea64a8/B in TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:23,766 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:23,766 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. files: [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/e887e0001a9541df99dab83d706c1c3c, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/3a823eaa8ec54fa1825864edef4d0af5, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/41afc61063684884a81a161c0a1a0741] 2024-12-08T00:21:23,766 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/2f05f8f6986a44ab8e085afd16b1cebb, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/dcce2c892476415e9c6b7038ec13c375, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/59145572a6cb484bb84548a2cd029822] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp, totalSize=35.9 K 2024-12-08T00:21:23,766 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting e887e0001a9541df99dab83d706c1c3c, keycount=150, bloomtype=ROW, size=30.6 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1733617279085 2024-12-08T00:21:23,766 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 2f05f8f6986a44ab8e085afd16b1cebb, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1733617279085 2024-12-08T00:21:23,766 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3a823eaa8ec54fa1825864edef4d0af5, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1733617280244 2024-12-08T00:21:23,766 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting dcce2c892476415e9c6b7038ec13c375, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1733617280244 2024-12-08T00:21:23,766 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 41afc61063684884a81a161c0a1a0741, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1733617282390 2024-12-08T00:21:23,767 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 59145572a6cb484bb84548a2cd029822, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1733617282392 2024-12-08T00:21:23,773 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 46a39620848480c2f6f28f4fa1ea64a8#B#compaction#395 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:23,774 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/3017892959a64dcc918247e039a4c503 is 50, key is test_row_0/B:col10/1733617282392/Put/seqid=0 2024-12-08T00:21:23,775 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=46a39620848480c2f6f28f4fa1ea64a8] 2024-12-08T00:21:23,777 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412084e8b37ef37ce407b92d36eb29f2adaa7_46a39620848480c2f6f28f4fa1ea64a8 store=[table=TestAcidGuarantees family=A region=46a39620848480c2f6f28f4fa1ea64a8] 2024-12-08T00:21:23,779 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412084e8b37ef37ce407b92d36eb29f2adaa7_46a39620848480c2f6f28f4fa1ea64a8, store=[table=TestAcidGuarantees family=A region=46a39620848480c2f6f28f4fa1ea64a8] 2024-12-08T00:21:23,779 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412084e8b37ef37ce407b92d36eb29f2adaa7_46a39620848480c2f6f28f4fa1ea64a8 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=46a39620848480c2f6f28f4fa1ea64a8] 2024-12-08T00:21:23,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742294_1470 (size=12561) 2024-12-08T00:21:23,794 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:23,794 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-08T00:21:23,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:23,795 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2837): Flushing 46a39620848480c2f6f28f4fa1ea64a8 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-08T00:21:23,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=A 2024-12-08T00:21:23,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:23,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=B 2024-12-08T00:21:23,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:23,795 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/3017892959a64dcc918247e039a4c503 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/3017892959a64dcc918247e039a4c503 2024-12-08T00:21:23,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=C 2024-12-08T00:21:23,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:23,800 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 46a39620848480c2f6f28f4fa1ea64a8/B of 46a39620848480c2f6f28f4fa1ea64a8 into 3017892959a64dcc918247e039a4c503(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:23,800 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 46a39620848480c2f6f28f4fa1ea64a8: 2024-12-08T00:21:23,800 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8., storeName=46a39620848480c2f6f28f4fa1ea64a8/B, priority=13, startTime=1733617283765; duration=0sec 2024-12-08T00:21:23,801 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:21:23,801 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 46a39620848480c2f6f28f4fa1ea64a8:B 2024-12-08T00:21:23,801 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:21:23,801 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36711 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:21:23,802 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): 46a39620848480c2f6f28f4fa1ea64a8/C is initiating minor compaction (all files) 2024-12-08T00:21:23,802 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 46a39620848480c2f6f28f4fa1ea64a8/C in TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:23,802 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/9b359f923f6b4cccb6b82f054c95e14e, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/da98f1a22f8549a09bb42e9c7af05e07, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/d990293695e74820bc37d6886294776f] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp, totalSize=35.9 K 2024-12-08T00:21:23,802 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 9b359f923f6b4cccb6b82f054c95e14e, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1733617279085 2024-12-08T00:21:23,802 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting da98f1a22f8549a09bb42e9c7af05e07, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1733617280244 2024-12-08T00:21:23,803 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting d990293695e74820bc37d6886294776f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1733617282392 2024-12-08T00:21:23,811 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 46a39620848480c2f6f28f4fa1ea64a8#C#compaction#397 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:23,811 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/03cd4b9630554c94baefc22d48d07dd1 is 50, key is test_row_0/C:col10/1733617282392/Put/seqid=0 2024-12-08T00:21:23,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742295_1471 (size=4469) 2024-12-08T00:21:23,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412083003fd13a243404c9275ba33d6290ce1_46a39620848480c2f6f28f4fa1ea64a8 is 50, key is test_row_0/A:col10/1733617282527/Put/seqid=0 2024-12-08T00:21:23,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742296_1472 (size=12561) 2024-12-08T00:21:23,829 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/03cd4b9630554c94baefc22d48d07dd1 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/03cd4b9630554c94baefc22d48d07dd1 2024-12-08T00:21:23,833 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 46a39620848480c2f6f28f4fa1ea64a8/C of 46a39620848480c2f6f28f4fa1ea64a8 into 03cd4b9630554c94baefc22d48d07dd1(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:23,833 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 46a39620848480c2f6f28f4fa1ea64a8: 2024-12-08T00:21:23,833 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8., storeName=46a39620848480c2f6f28f4fa1ea64a8/C, priority=13, startTime=1733617283765; duration=0sec 2024-12-08T00:21:23,833 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:23,833 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 46a39620848480c2f6f28f4fa1ea64a8:C 2024-12-08T00:21:23,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742297_1473 (size=12304) 2024-12-08T00:21:23,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:23,839 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412083003fd13a243404c9275ba33d6290ce1_46a39620848480c2f6f28f4fa1ea64a8 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412083003fd13a243404c9275ba33d6290ce1_46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:23,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/a5d209127d2241eca7da4a7d055cb33c, store: [table=TestAcidGuarantees family=A region=46a39620848480c2f6f28f4fa1ea64a8] 2024-12-08T00:21:23,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/a5d209127d2241eca7da4a7d055cb33c is 175, key is test_row_0/A:col10/1733617282527/Put/seqid=0 2024-12-08T00:21:23,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742298_1474 (size=31105) 2024-12-08T00:21:23,845 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=195, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/a5d209127d2241eca7da4a7d055cb33c 2024-12-08T00:21:23,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/5f0b5ee06a70496f9443b8655d1f3e95 is 50, key is test_row_0/B:col10/1733617282527/Put/seqid=0 2024-12-08T00:21:23,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742299_1475 (size=12151) 2024-12-08T00:21:23,857 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=195 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/5f0b5ee06a70496f9443b8655d1f3e95 2024-12-08T00:21:23,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/717ebb6a63f74efdb5a716be2a4d8ab1 is 50, key is test_row_0/C:col10/1733617282527/Put/seqid=0 2024-12-08T00:21:23,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742300_1476 (size=12151) 2024-12-08T00:21:24,215 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 46a39620848480c2f6f28f4fa1ea64a8#A#compaction#396 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:24,216 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/49d32d8e5ea5404ea5f33537c16ae619 is 175, key is test_row_0/A:col10/1733617282392/Put/seqid=0 2024-12-08T00:21:24,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742301_1477 (size=31515) 2024-12-08T00:21:24,269 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=195 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/717ebb6a63f74efdb5a716be2a4d8ab1 2024-12-08T00:21:24,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/a5d209127d2241eca7da4a7d055cb33c as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/a5d209127d2241eca7da4a7d055cb33c 2024-12-08T00:21:24,280 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/a5d209127d2241eca7da4a7d055cb33c, entries=150, sequenceid=195, filesize=30.4 K 2024-12-08T00:21:24,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/5f0b5ee06a70496f9443b8655d1f3e95 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/5f0b5ee06a70496f9443b8655d1f3e95 2024-12-08T00:21:24,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,284 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/5f0b5ee06a70496f9443b8655d1f3e95, entries=150, sequenceid=195, filesize=11.9 K 2024-12-08T00:21:24,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/717ebb6a63f74efdb5a716be2a4d8ab1 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/717ebb6a63f74efdb5a716be2a4d8ab1 2024-12-08T00:21:24,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,289 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/717ebb6a63f74efdb5a716be2a4d8ab1, entries=150, sequenceid=195, filesize=11.9 K 2024-12-08T00:21:24,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,290 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=0 B/0 for 46a39620848480c2f6f28f4fa1ea64a8 in 495ms, sequenceid=195, compaction requested=false 2024-12-08T00:21:24,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2538): Flush status journal for 46a39620848480c2f6f28f4fa1ea64a8: 2024-12-08T00:21:24,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:24,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=115 2024-12-08T00:21:24,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4106): Remote procedure done, pid=115 2024-12-08T00:21:24,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,300 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=115, resume processing ppid=114 2024-12-08T00:21:24,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,300 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=115, ppid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7180 sec 2024-12-08T00:21:24,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,302 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees in 1.7300 sec 2024-12-08T00:21:24,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,625 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/49d32d8e5ea5404ea5f33537c16ae619 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/49d32d8e5ea5404ea5f33537c16ae619 2024-12-08T00:21:24,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,630 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 46a39620848480c2f6f28f4fa1ea64a8/A of 46a39620848480c2f6f28f4fa1ea64a8 into 49d32d8e5ea5404ea5f33537c16ae619(size=30.8 K), total size for store is 61.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:24,630 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 46a39620848480c2f6f28f4fa1ea64a8: 2024-12-08T00:21:24,630 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8., storeName=46a39620848480c2f6f28f4fa1ea64a8/A, priority=13, startTime=1733617283764; duration=0sec 2024-12-08T00:21:24,630 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:24,630 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 46a39620848480c2f6f28f4fa1ea64a8:A 2024-12-08T00:21:24,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-08T00:21:24,676 INFO [Thread-1871 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 114 completed 2024-12-08T00:21:24,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,677 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T00:21:24,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees 2024-12-08T00:21:24,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,679 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:21:24,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-08T00:21:24,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,679 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:21:24,679 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=117, ppid=116, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:21:24,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-08T00:21:24,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on 46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:24,815 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 46a39620848480c2f6f28f4fa1ea64a8 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-08T00:21:24,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,816 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=A 2024-12-08T00:21:24,816 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:24,816 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=B 2024-12-08T00:21:24,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,816 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:24,816 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=C 2024-12-08T00:21:24,816 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:24,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,828 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120833aa31fc662f45c28da43c66df4ff802_46a39620848480c2f6f28f4fa1ea64a8 is 50, key is test_row_1/A:col10/1733617284785/Put/seqid=0 2024-12-08T00:21:24,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,831 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:24,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,831 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-12-08T00:21:24,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:24,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. as already flushing 2024-12-08T00:21:24,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:24,832 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:24,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:24,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:24,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742302_1478 (size=12304) 2024-12-08T00:21:24,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:24,873 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:24,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617344866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:24,873 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:24,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617344866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:24,875 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:24,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617344872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:24,880 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:24,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617344872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:24,880 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:24,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617344873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:24,977 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:24,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617344974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:24,977 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:24,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617344975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:24,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-08T00:21:24,982 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:24,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617344976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:24,984 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:24,984 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-12-08T00:21:24,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:24,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. as already flushing 2024-12-08T00:21:24,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:24,984 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:24,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:24,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:24,987 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:24,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617344982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:24,988 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:24,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617344982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:25,136 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:25,137 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-12-08T00:21:25,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:25,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. as already flushing 2024-12-08T00:21:25,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:25,137 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:25,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:25,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:25,184 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:25,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617345178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:25,184 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:25,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617345179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:25,185 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:25,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617345183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:25,192 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:25,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617345188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:25,192 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:25,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617345189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:25,235 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:25,239 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120833aa31fc662f45c28da43c66df4ff802_46a39620848480c2f6f28f4fa1ea64a8 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120833aa31fc662f45c28da43c66df4ff802_46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:25,240 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/623eb1ce58f14aa69d161cd1424bdfe9, store: [table=TestAcidGuarantees family=A region=46a39620848480c2f6f28f4fa1ea64a8] 2024-12-08T00:21:25,240 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/623eb1ce58f14aa69d161cd1424bdfe9 is 175, key is test_row_1/A:col10/1733617284785/Put/seqid=0 2024-12-08T00:21:25,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742303_1479 (size=31101) 2024-12-08T00:21:25,244 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=211, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/623eb1ce58f14aa69d161cd1424bdfe9 2024-12-08T00:21:25,250 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/bf8649cbafae4762893882d8617c0fac is 50, key is test_row_1/B:col10/1733617284785/Put/seqid=0 2024-12-08T00:21:25,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742304_1480 (size=9757) 2024-12-08T00:21:25,253 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/bf8649cbafae4762893882d8617c0fac 2024-12-08T00:21:25,260 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/601ccc044f6b4d8db8970cc84ac17c0f is 50, key is test_row_1/C:col10/1733617284785/Put/seqid=0 2024-12-08T00:21:25,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-08T00:21:25,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742305_1481 (size=9757) 2024-12-08T00:21:25,283 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/601ccc044f6b4d8db8970cc84ac17c0f 2024-12-08T00:21:25,289 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/623eb1ce58f14aa69d161cd1424bdfe9 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/623eb1ce58f14aa69d161cd1424bdfe9 2024-12-08T00:21:25,289 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:25,289 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-12-08T00:21:25,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:25,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. as already flushing 2024-12-08T00:21:25,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:25,290 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:25,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:25,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:25,294 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/623eb1ce58f14aa69d161cd1424bdfe9, entries=150, sequenceid=211, filesize=30.4 K 2024-12-08T00:21:25,294 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/bf8649cbafae4762893882d8617c0fac as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/bf8649cbafae4762893882d8617c0fac 2024-12-08T00:21:25,297 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/bf8649cbafae4762893882d8617c0fac, entries=100, sequenceid=211, filesize=9.5 K 2024-12-08T00:21:25,298 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/601ccc044f6b4d8db8970cc84ac17c0f as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/601ccc044f6b4d8db8970cc84ac17c0f 2024-12-08T00:21:25,301 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/601ccc044f6b4d8db8970cc84ac17c0f, entries=100, sequenceid=211, filesize=9.5 K 2024-12-08T00:21:25,302 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 46a39620848480c2f6f28f4fa1ea64a8 in 488ms, sequenceid=211, compaction requested=true 2024-12-08T00:21:25,303 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 46a39620848480c2f6f28f4fa1ea64a8: 2024-12-08T00:21:25,303 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 46a39620848480c2f6f28f4fa1ea64a8:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:21:25,303 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:25,303 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 46a39620848480c2f6f28f4fa1ea64a8:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T00:21:25,303 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:25,303 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:21:25,303 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 46a39620848480c2f6f28f4fa1ea64a8:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:21:25,303 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:21:25,303 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:21:25,304 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34469 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:21:25,304 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:21:25,304 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): 46a39620848480c2f6f28f4fa1ea64a8/B is initiating minor compaction (all files) 2024-12-08T00:21:25,304 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): 46a39620848480c2f6f28f4fa1ea64a8/A is initiating minor compaction (all files) 2024-12-08T00:21:25,304 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 46a39620848480c2f6f28f4fa1ea64a8/B in TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:25,304 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 46a39620848480c2f6f28f4fa1ea64a8/A in TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:25,304 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/3017892959a64dcc918247e039a4c503, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/5f0b5ee06a70496f9443b8655d1f3e95, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/bf8649cbafae4762893882d8617c0fac] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp, totalSize=33.7 K 2024-12-08T00:21:25,304 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/49d32d8e5ea5404ea5f33537c16ae619, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/a5d209127d2241eca7da4a7d055cb33c, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/623eb1ce58f14aa69d161cd1424bdfe9] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp, totalSize=91.5 K 2024-12-08T00:21:25,304 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:25,304 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. files: [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/49d32d8e5ea5404ea5f33537c16ae619, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/a5d209127d2241eca7da4a7d055cb33c, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/623eb1ce58f14aa69d161cd1424bdfe9] 2024-12-08T00:21:25,304 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 49d32d8e5ea5404ea5f33537c16ae619, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1733617282392 2024-12-08T00:21:25,304 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 3017892959a64dcc918247e039a4c503, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1733617282392 2024-12-08T00:21:25,305 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 5f0b5ee06a70496f9443b8655d1f3e95, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1733617282519 2024-12-08T00:21:25,305 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting a5d209127d2241eca7da4a7d055cb33c, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1733617282519 2024-12-08T00:21:25,305 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting bf8649cbafae4762893882d8617c0fac, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1733617284785 2024-12-08T00:21:25,305 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 623eb1ce58f14aa69d161cd1424bdfe9, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1733617284785 2024-12-08T00:21:25,314 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=46a39620848480c2f6f28f4fa1ea64a8] 2024-12-08T00:21:25,318 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 46a39620848480c2f6f28f4fa1ea64a8#B#compaction#405 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:25,319 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/38fc5d99c6624c04b7763393833400ff is 50, key is test_row_0/B:col10/1733617282527/Put/seqid=0 2024-12-08T00:21:25,321 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024120879e9b7eaf51c453c9640038cd1a3a681_46a39620848480c2f6f28f4fa1ea64a8 store=[table=TestAcidGuarantees family=A region=46a39620848480c2f6f28f4fa1ea64a8] 2024-12-08T00:21:25,322 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024120879e9b7eaf51c453c9640038cd1a3a681_46a39620848480c2f6f28f4fa1ea64a8, store=[table=TestAcidGuarantees family=A region=46a39620848480c2f6f28f4fa1ea64a8] 2024-12-08T00:21:25,323 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120879e9b7eaf51c453c9640038cd1a3a681_46a39620848480c2f6f28f4fa1ea64a8 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=46a39620848480c2f6f28f4fa1ea64a8] 2024-12-08T00:21:25,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742306_1482 (size=12663) 2024-12-08T00:21:25,341 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/38fc5d99c6624c04b7763393833400ff as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/38fc5d99c6624c04b7763393833400ff 2024-12-08T00:21:25,347 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 46a39620848480c2f6f28f4fa1ea64a8/B of 46a39620848480c2f6f28f4fa1ea64a8 into 38fc5d99c6624c04b7763393833400ff(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:25,347 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 46a39620848480c2f6f28f4fa1ea64a8: 2024-12-08T00:21:25,347 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8., storeName=46a39620848480c2f6f28f4fa1ea64a8/B, priority=13, startTime=1733617285303; duration=0sec 2024-12-08T00:21:25,347 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:21:25,347 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 46a39620848480c2f6f28f4fa1ea64a8:B 2024-12-08T00:21:25,347 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:21:25,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742307_1483 (size=4469) 2024-12-08T00:21:25,349 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34469 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:21:25,349 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): 46a39620848480c2f6f28f4fa1ea64a8/C is initiating minor compaction (all files) 2024-12-08T00:21:25,349 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 46a39620848480c2f6f28f4fa1ea64a8/C in TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:25,349 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/03cd4b9630554c94baefc22d48d07dd1, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/717ebb6a63f74efdb5a716be2a4d8ab1, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/601ccc044f6b4d8db8970cc84ac17c0f] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp, totalSize=33.7 K 2024-12-08T00:21:25,350 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 03cd4b9630554c94baefc22d48d07dd1, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1733617282392 2024-12-08T00:21:25,350 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 717ebb6a63f74efdb5a716be2a4d8ab1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1733617282519 2024-12-08T00:21:25,350 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 601ccc044f6b4d8db8970cc84ac17c0f, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1733617284785 2024-12-08T00:21:25,354 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 46a39620848480c2f6f28f4fa1ea64a8#A#compaction#404 average throughput is 0.61 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:25,354 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/369d828985ae461f9c3ce871d260bc8a is 175, key is test_row_0/A:col10/1733617282527/Put/seqid=0 2024-12-08T00:21:25,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742308_1484 (size=31724) 2024-12-08T00:21:25,370 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 46a39620848480c2f6f28f4fa1ea64a8#C#compaction#406 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:25,370 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/3dffb66baed2475880c5bb0cd9f18baa is 50, key is test_row_0/C:col10/1733617282527/Put/seqid=0 2024-12-08T00:21:25,372 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/369d828985ae461f9c3ce871d260bc8a as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/369d828985ae461f9c3ce871d260bc8a 2024-12-08T00:21:25,377 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 46a39620848480c2f6f28f4fa1ea64a8/A of 46a39620848480c2f6f28f4fa1ea64a8 into 369d828985ae461f9c3ce871d260bc8a(size=31.0 K), total size for store is 31.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:25,377 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 46a39620848480c2f6f28f4fa1ea64a8: 2024-12-08T00:21:25,377 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8., storeName=46a39620848480c2f6f28f4fa1ea64a8/A, priority=13, startTime=1733617285303; duration=0sec 2024-12-08T00:21:25,377 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:25,377 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 46a39620848480c2f6f28f4fa1ea64a8:A 2024-12-08T00:21:25,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742309_1485 (size=12663) 2024-12-08T00:21:25,392 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/3dffb66baed2475880c5bb0cd9f18baa as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/3dffb66baed2475880c5bb0cd9f18baa 2024-12-08T00:21:25,396 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 46a39620848480c2f6f28f4fa1ea64a8/C of 46a39620848480c2f6f28f4fa1ea64a8 into 3dffb66baed2475880c5bb0cd9f18baa(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:25,396 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 46a39620848480c2f6f28f4fa1ea64a8: 2024-12-08T00:21:25,396 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8., storeName=46a39620848480c2f6f28f4fa1ea64a8/C, priority=13, startTime=1733617285303; duration=0sec 2024-12-08T00:21:25,396 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:25,396 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 46a39620848480c2f6f28f4fa1ea64a8:C 2024-12-08T00:21:25,442 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:25,443 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-12-08T00:21:25,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:25,443 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2837): Flushing 46a39620848480c2f6f28f4fa1ea64a8 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-08T00:21:25,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=A 2024-12-08T00:21:25,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:25,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=B 2024-12-08T00:21:25,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:25,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=C 2024-12-08T00:21:25,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:25,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120850aa1a3e3e0d49968fbb277f3323e223_46a39620848480c2f6f28f4fa1ea64a8 is 50, key is test_row_0/A:col10/1733617284869/Put/seqid=0 2024-12-08T00:21:25,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742310_1486 (size=12304) 2024-12-08T00:21:25,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:25,462 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120850aa1a3e3e0d49968fbb277f3323e223_46a39620848480c2f6f28f4fa1ea64a8 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120850aa1a3e3e0d49968fbb277f3323e223_46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:25,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/4d1b3867c3ed4c448cd60563c9c009e9, store: [table=TestAcidGuarantees family=A region=46a39620848480c2f6f28f4fa1ea64a8] 2024-12-08T00:21:25,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/4d1b3867c3ed4c448cd60563c9c009e9 is 175, key is test_row_0/A:col10/1733617284869/Put/seqid=0 2024-12-08T00:21:25,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742311_1487 (size=31105) 2024-12-08T00:21:25,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on 46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:25,487 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. as already flushing 2024-12-08T00:21:25,502 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:25,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617345496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:25,502 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:25,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617345499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:25,508 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:25,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617345500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:25,509 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:25,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617345502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:25,509 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:25,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617345502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:25,607 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:25,608 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:25,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617345603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:25,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617345603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:25,615 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:25,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617345609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:25,615 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:25,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617345610, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:25,615 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:25,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617345610, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:25,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-08T00:21:25,814 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:25,814 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:25,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617345809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:25,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617345809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:25,821 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:25,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617345816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:25,822 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:25,822 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:25,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617345817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:25,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617345817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:25,869 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=237, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/4d1b3867c3ed4c448cd60563c9c009e9 2024-12-08T00:21:25,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/ea021a91114f4cf6aa07ba62a9aa0aee is 50, key is test_row_0/B:col10/1733617284869/Put/seqid=0 2024-12-08T00:21:25,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742312_1488 (size=12151) 2024-12-08T00:21:25,881 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/ea021a91114f4cf6aa07ba62a9aa0aee 2024-12-08T00:21:25,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/15bb9a1db9e8474aa20cac30f2c22953 is 50, key is test_row_0/C:col10/1733617284869/Put/seqid=0 2024-12-08T00:21:25,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742313_1489 (size=12151) 2024-12-08T00:21:26,120 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:26,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617346115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:26,120 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:26,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617346116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:26,127 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:26,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617346123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:26,127 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:26,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617346125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:26,127 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:26,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617346125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:26,291 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/15bb9a1db9e8474aa20cac30f2c22953 2024-12-08T00:21:26,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/4d1b3867c3ed4c448cd60563c9c009e9 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/4d1b3867c3ed4c448cd60563c9c009e9 2024-12-08T00:21:26,299 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/4d1b3867c3ed4c448cd60563c9c009e9, entries=150, sequenceid=237, filesize=30.4 K 2024-12-08T00:21:26,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/ea021a91114f4cf6aa07ba62a9aa0aee as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/ea021a91114f4cf6aa07ba62a9aa0aee 2024-12-08T00:21:26,303 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/ea021a91114f4cf6aa07ba62a9aa0aee, entries=150, sequenceid=237, filesize=11.9 K 2024-12-08T00:21:26,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/15bb9a1db9e8474aa20cac30f2c22953 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/15bb9a1db9e8474aa20cac30f2c22953 2024-12-08T00:21:26,307 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/15bb9a1db9e8474aa20cac30f2c22953, entries=150, sequenceid=237, filesize=11.9 K 2024-12-08T00:21:26,308 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 46a39620848480c2f6f28f4fa1ea64a8 in 865ms, sequenceid=237, compaction requested=false 2024-12-08T00:21:26,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2538): Flush status journal for 46a39620848480c2f6f28f4fa1ea64a8: 2024-12-08T00:21:26,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:26,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=117 2024-12-08T00:21:26,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4106): Remote procedure done, pid=117 2024-12-08T00:21:26,311 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=117, resume processing ppid=116 2024-12-08T00:21:26,311 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=117, ppid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6310 sec 2024-12-08T00:21:26,312 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees in 1.6340 sec 2024-12-08T00:21:26,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on 46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:26,625 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 46a39620848480c2f6f28f4fa1ea64a8 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-08T00:21:26,626 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=A 2024-12-08T00:21:26,626 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:26,626 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=B 2024-12-08T00:21:26,626 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:26,626 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=C 2024-12-08T00:21:26,626 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:26,633 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120892f957edbfcd4244919d097c38727400_46a39620848480c2f6f28f4fa1ea64a8 is 50, key is test_row_0/A:col10/1733617286625/Put/seqid=0 2024-12-08T00:21:26,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742314_1490 (size=17284) 2024-12-08T00:21:26,640 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:26,644 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120892f957edbfcd4244919d097c38727400_46a39620848480c2f6f28f4fa1ea64a8 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120892f957edbfcd4244919d097c38727400_46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:26,644 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/8253cb1801074d68a2ecfb8780f2db81, store: [table=TestAcidGuarantees family=A region=46a39620848480c2f6f28f4fa1ea64a8] 2024-12-08T00:21:26,645 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/8253cb1801074d68a2ecfb8780f2db81 is 175, key is test_row_0/A:col10/1733617286625/Put/seqid=0 2024-12-08T00:21:26,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742315_1491 (size=48389) 2024-12-08T00:21:26,650 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=252, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/8253cb1801074d68a2ecfb8780f2db81 2024-12-08T00:21:26,656 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/4e79adf66f4a4f64bbee89f6accf4426 is 50, key is test_row_0/B:col10/1733617286625/Put/seqid=0 2024-12-08T00:21:26,661 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:26,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617346652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:26,661 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:26,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617346654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:26,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742316_1492 (size=12151) 2024-12-08T00:21:26,667 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:26,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617346660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:26,668 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:26,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617346661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:26,668 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:26,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617346661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:26,765 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:26,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617346762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:26,766 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:26,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617346762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:26,773 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:26,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617346768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:26,773 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:26,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617346769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:26,774 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:26,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617346769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:26,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-08T00:21:26,783 INFO [Thread-1871 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 116 completed 2024-12-08T00:21:26,784 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T00:21:26,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees 2024-12-08T00:21:26,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-08T00:21:26,785 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:21:26,786 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:21:26,786 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=119, ppid=118, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:21:26,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-08T00:21:26,937 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:26,938 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-08T00:21:26,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:26,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. as already flushing 2024-12-08T00:21:26,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:26,938 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:26,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:26,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:26,971 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:26,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617346967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:26,975 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:26,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617346968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:26,976 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:26,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617346974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:26,976 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:26,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617346974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:26,981 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:26,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617346975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:27,067 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/4e79adf66f4a4f64bbee89f6accf4426 2024-12-08T00:21:27,075 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/4aff4962d7384ef9a7e78389a8329f62 is 50, key is test_row_0/C:col10/1733617286625/Put/seqid=0 2024-12-08T00:21:27,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742317_1493 (size=12151) 2024-12-08T00:21:27,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-08T00:21:27,090 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:27,091 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-08T00:21:27,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:27,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. as already flushing 2024-12-08T00:21:27,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:27,091 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:27,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:27,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:27,243 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:27,244 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-08T00:21:27,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:27,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. as already flushing 2024-12-08T00:21:27,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:27,244 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:27,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:27,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:27,276 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:27,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617347273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:27,282 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:27,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617347277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:27,283 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:27,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617347278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:27,284 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:27,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617347279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:27,287 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:27,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617347284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:27,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-08T00:21:27,396 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:27,396 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-08T00:21:27,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:27,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. as already flushing 2024-12-08T00:21:27,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:27,397 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:27,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:27,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:27,479 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/4aff4962d7384ef9a7e78389a8329f62 2024-12-08T00:21:27,484 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/8253cb1801074d68a2ecfb8780f2db81 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/8253cb1801074d68a2ecfb8780f2db81 2024-12-08T00:21:27,487 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/8253cb1801074d68a2ecfb8780f2db81, entries=250, sequenceid=252, filesize=47.3 K 2024-12-08T00:21:27,488 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/4e79adf66f4a4f64bbee89f6accf4426 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/4e79adf66f4a4f64bbee89f6accf4426 2024-12-08T00:21:27,491 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/4e79adf66f4a4f64bbee89f6accf4426, entries=150, sequenceid=252, filesize=11.9 K 2024-12-08T00:21:27,492 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/4aff4962d7384ef9a7e78389a8329f62 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/4aff4962d7384ef9a7e78389a8329f62 2024-12-08T00:21:27,495 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/4aff4962d7384ef9a7e78389a8329f62, entries=150, sequenceid=252, filesize=11.9 K 2024-12-08T00:21:27,495 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for 46a39620848480c2f6f28f4fa1ea64a8 in 870ms, sequenceid=252, compaction requested=true 2024-12-08T00:21:27,495 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 46a39620848480c2f6f28f4fa1ea64a8: 2024-12-08T00:21:27,495 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 46a39620848480c2f6f28f4fa1ea64a8:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:21:27,495 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:27,495 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:21:27,496 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:21:27,496 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 46a39620848480c2f6f28f4fa1ea64a8:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T00:21:27,496 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:27,496 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 46a39620848480c2f6f28f4fa1ea64a8:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:21:27,496 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:21:27,497 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 111218 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:21:27,497 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): 46a39620848480c2f6f28f4fa1ea64a8/A is initiating minor compaction (all files) 2024-12-08T00:21:27,497 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 46a39620848480c2f6f28f4fa1ea64a8/A in TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:27,497 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/369d828985ae461f9c3ce871d260bc8a, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/4d1b3867c3ed4c448cd60563c9c009e9, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/8253cb1801074d68a2ecfb8780f2db81] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp, totalSize=108.6 K 2024-12-08T00:21:27,497 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:27,497 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. files: [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/369d828985ae461f9c3ce871d260bc8a, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/4d1b3867c3ed4c448cd60563c9c009e9, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/8253cb1801074d68a2ecfb8780f2db81] 2024-12-08T00:21:27,498 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:21:27,498 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): 46a39620848480c2f6f28f4fa1ea64a8/B is initiating minor compaction (all files) 2024-12-08T00:21:27,498 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 369d828985ae461f9c3ce871d260bc8a, keycount=150, bloomtype=ROW, size=31.0 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1733617282527 2024-12-08T00:21:27,498 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 46a39620848480c2f6f28f4fa1ea64a8/B in TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:27,498 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/38fc5d99c6624c04b7763393833400ff, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/ea021a91114f4cf6aa07ba62a9aa0aee, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/4e79adf66f4a4f64bbee89f6accf4426] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp, totalSize=36.1 K 2024-12-08T00:21:27,498 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4d1b3867c3ed4c448cd60563c9c009e9, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1733617284869 2024-12-08T00:21:27,498 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 38fc5d99c6624c04b7763393833400ff, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1733617282527 2024-12-08T00:21:27,498 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8253cb1801074d68a2ecfb8780f2db81, keycount=250, bloomtype=ROW, size=47.3 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1733617285499 2024-12-08T00:21:27,498 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting ea021a91114f4cf6aa07ba62a9aa0aee, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1733617284869 2024-12-08T00:21:27,499 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 4e79adf66f4a4f64bbee89f6accf4426, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1733617285501 2024-12-08T00:21:27,505 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 46a39620848480c2f6f28f4fa1ea64a8#B#compaction#413 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:27,505 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/2885e5b36ccb4848996892d0ace31c5e is 50, key is test_row_0/B:col10/1733617286625/Put/seqid=0 2024-12-08T00:21:27,507 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=46a39620848480c2f6f28f4fa1ea64a8] 2024-12-08T00:21:27,523 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241208c6b3d25ce85c4e189d970094ef3e7b41_46a39620848480c2f6f28f4fa1ea64a8 store=[table=TestAcidGuarantees family=A region=46a39620848480c2f6f28f4fa1ea64a8] 2024-12-08T00:21:27,525 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241208c6b3d25ce85c4e189d970094ef3e7b41_46a39620848480c2f6f28f4fa1ea64a8, store=[table=TestAcidGuarantees family=A region=46a39620848480c2f6f28f4fa1ea64a8] 2024-12-08T00:21:27,525 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208c6b3d25ce85c4e189d970094ef3e7b41_46a39620848480c2f6f28f4fa1ea64a8 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=46a39620848480c2f6f28f4fa1ea64a8] 2024-12-08T00:21:27,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742318_1494 (size=12765) 2024-12-08T00:21:27,536 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/2885e5b36ccb4848996892d0ace31c5e as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/2885e5b36ccb4848996892d0ace31c5e 2024-12-08T00:21:27,540 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 46a39620848480c2f6f28f4fa1ea64a8/B of 46a39620848480c2f6f28f4fa1ea64a8 into 2885e5b36ccb4848996892d0ace31c5e(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:27,540 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 46a39620848480c2f6f28f4fa1ea64a8: 2024-12-08T00:21:27,540 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8., storeName=46a39620848480c2f6f28f4fa1ea64a8/B, priority=13, startTime=1733617287495; duration=0sec 2024-12-08T00:21:27,541 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:21:27,541 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 46a39620848480c2f6f28f4fa1ea64a8:B 2024-12-08T00:21:27,541 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:21:27,542 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:21:27,542 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): 46a39620848480c2f6f28f4fa1ea64a8/C is initiating minor compaction (all files) 2024-12-08T00:21:27,542 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 46a39620848480c2f6f28f4fa1ea64a8/C in TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:27,542 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/3dffb66baed2475880c5bb0cd9f18baa, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/15bb9a1db9e8474aa20cac30f2c22953, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/4aff4962d7384ef9a7e78389a8329f62] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp, totalSize=36.1 K 2024-12-08T00:21:27,542 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 3dffb66baed2475880c5bb0cd9f18baa, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1733617282527 2024-12-08T00:21:27,543 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 15bb9a1db9e8474aa20cac30f2c22953, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1733617284869 2024-12-08T00:21:27,543 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 4aff4962d7384ef9a7e78389a8329f62, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1733617285501 2024-12-08T00:21:27,549 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:27,549 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 46a39620848480c2f6f28f4fa1ea64a8#C#compaction#415 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:27,550 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/eb05a1d583c94f359cd51722c21840b6 is 50, key is test_row_0/C:col10/1733617286625/Put/seqid=0 2024-12-08T00:21:27,551 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-08T00:21:27,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:27,551 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2837): Flushing 46a39620848480c2f6f28f4fa1ea64a8 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-08T00:21:27,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=A 2024-12-08T00:21:27,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742319_1495 (size=4469) 2024-12-08T00:21:27,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:27,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=B 2024-12-08T00:21:27,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:27,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=C 2024-12-08T00:21:27,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:27,553 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 46a39620848480c2f6f28f4fa1ea64a8#A#compaction#414 average throughput is 0.53 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:27,554 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/d3f20f97ae38495396a2fe2e5b003cae is 175, key is test_row_0/A:col10/1733617286625/Put/seqid=0 2024-12-08T00:21:27,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742320_1496 (size=12765) 2024-12-08T00:21:27,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742321_1497 (size=31719) 2024-12-08T00:21:27,567 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/eb05a1d583c94f359cd51722c21840b6 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/eb05a1d583c94f359cd51722c21840b6 2024-12-08T00:21:27,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208ada8482871724bcab81b9c34753a4d95_46a39620848480c2f6f28f4fa1ea64a8 is 50, key is test_row_0/A:col10/1733617286660/Put/seqid=0 2024-12-08T00:21:27,573 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 46a39620848480c2f6f28f4fa1ea64a8/C of 46a39620848480c2f6f28f4fa1ea64a8 into eb05a1d583c94f359cd51722c21840b6(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:27,573 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 46a39620848480c2f6f28f4fa1ea64a8: 2024-12-08T00:21:27,573 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8., storeName=46a39620848480c2f6f28f4fa1ea64a8/C, priority=13, startTime=1733617287496; duration=0sec 2024-12-08T00:21:27,573 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:27,573 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 46a39620848480c2f6f28f4fa1ea64a8:C 2024-12-08T00:21:27,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742322_1498 (size=12454) 2024-12-08T00:21:27,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on 46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:27,782 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. as already flushing 2024-12-08T00:21:27,801 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:27,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617347794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:27,801 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:27,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617347794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:27,802 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:27,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617347795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:27,804 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:27,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617347798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:27,804 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:27,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617347799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:27,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-08T00:21:27,906 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:27,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617347902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:27,906 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:27,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617347902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:27,909 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:27,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617347903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:27,910 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:27,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617347905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:27,910 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:27,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617347905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:27,972 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/d3f20f97ae38495396a2fe2e5b003cae as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/d3f20f97ae38495396a2fe2e5b003cae 2024-12-08T00:21:27,976 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 46a39620848480c2f6f28f4fa1ea64a8/A of 46a39620848480c2f6f28f4fa1ea64a8 into d3f20f97ae38495396a2fe2e5b003cae(size=31.0 K), total size for store is 31.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:27,976 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 46a39620848480c2f6f28f4fa1ea64a8: 2024-12-08T00:21:27,976 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8., storeName=46a39620848480c2f6f28f4fa1ea64a8/A, priority=13, startTime=1733617287495; duration=0sec 2024-12-08T00:21:27,976 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:27,976 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 46a39620848480c2f6f28f4fa1ea64a8:A 2024-12-08T00:21:27,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:27,980 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208ada8482871724bcab81b9c34753a4d95_46a39620848480c2f6f28f4fa1ea64a8 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208ada8482871724bcab81b9c34753a4d95_46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:27,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/12818b29a55b4a33bf93aefcbb384b12, store: [table=TestAcidGuarantees family=A region=46a39620848480c2f6f28f4fa1ea64a8] 2024-12-08T00:21:27,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/12818b29a55b4a33bf93aefcbb384b12 is 175, key is test_row_0/A:col10/1733617286660/Put/seqid=0 2024-12-08T00:21:27,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742323_1499 (size=31255) 2024-12-08T00:21:28,112 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:28,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617348107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:28,113 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:28,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617348108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:28,115 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:28,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617348111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:28,115 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:28,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617348111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:28,116 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:28,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617348112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:28,391 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=275, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/12818b29a55b4a33bf93aefcbb384b12 2024-12-08T00:21:28,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/7ac27aac257b42819e702af4c4800049 is 50, key is test_row_0/B:col10/1733617286660/Put/seqid=0 2024-12-08T00:21:28,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742324_1500 (size=12301) 2024-12-08T00:21:28,404 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=275 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/7ac27aac257b42819e702af4c4800049 2024-12-08T00:21:28,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/0592b361dbc84878b7cf9f0985277137 is 50, key is test_row_0/C:col10/1733617286660/Put/seqid=0 2024-12-08T00:21:28,418 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:28,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617348416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:28,419 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:28,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617348417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:28,419 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:28,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617348417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:28,421 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:28,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617348419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:28,421 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:28,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617348420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:28,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742325_1501 (size=12301) 2024-12-08T00:21:28,829 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=275 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/0592b361dbc84878b7cf9f0985277137 2024-12-08T00:21:28,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/12818b29a55b4a33bf93aefcbb384b12 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/12818b29a55b4a33bf93aefcbb384b12 2024-12-08T00:21:28,837 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/12818b29a55b4a33bf93aefcbb384b12, entries=150, sequenceid=275, filesize=30.5 K 2024-12-08T00:21:28,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/7ac27aac257b42819e702af4c4800049 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/7ac27aac257b42819e702af4c4800049 2024-12-08T00:21:28,841 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/7ac27aac257b42819e702af4c4800049, entries=150, sequenceid=275, filesize=12.0 K 2024-12-08T00:21:28,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/0592b361dbc84878b7cf9f0985277137 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/0592b361dbc84878b7cf9f0985277137 2024-12-08T00:21:28,845 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/0592b361dbc84878b7cf9f0985277137, entries=150, sequenceid=275, filesize=12.0 K 2024-12-08T00:21:28,846 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 46a39620848480c2f6f28f4fa1ea64a8 in 1295ms, sequenceid=275, compaction requested=false 2024-12-08T00:21:28,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2538): Flush status journal for 46a39620848480c2f6f28f4fa1ea64a8: 2024-12-08T00:21:28,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:28,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=119 2024-12-08T00:21:28,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4106): Remote procedure done, pid=119 2024-12-08T00:21:28,848 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=119, resume processing ppid=118 2024-12-08T00:21:28,848 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=119, ppid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0610 sec 2024-12-08T00:21:28,850 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees in 2.0650 sec 2024-12-08T00:21:28,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-08T00:21:28,889 INFO [Thread-1871 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 118 completed 2024-12-08T00:21:28,890 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T00:21:28,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=120, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees 2024-12-08T00:21:28,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-08T00:21:28,891 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=120, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:21:28,892 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=120, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:21:28,892 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=121, ppid=120, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:21:28,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on 46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:28,923 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 46a39620848480c2f6f28f4fa1ea64a8 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-08T00:21:28,924 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=A 2024-12-08T00:21:28,924 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:28,924 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=B 2024-12-08T00:21:28,924 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:28,924 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=C 2024-12-08T00:21:28,924 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:28,932 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120824ed98c0e5d944b98662145f667756e7_46a39620848480c2f6f28f4fa1ea64a8 is 50, key is test_row_0/A:col10/1733617288923/Put/seqid=0 2024-12-08T00:21:28,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742326_1502 (size=17534) 2024-12-08T00:21:28,937 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:28,940 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120824ed98c0e5d944b98662145f667756e7_46a39620848480c2f6f28f4fa1ea64a8 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120824ed98c0e5d944b98662145f667756e7_46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:28,941 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/b107492b1673490182733c3cf1ffc56e, store: [table=TestAcidGuarantees family=A region=46a39620848480c2f6f28f4fa1ea64a8] 2024-12-08T00:21:28,941 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/b107492b1673490182733c3cf1ffc56e is 175, key is test_row_0/A:col10/1733617288923/Put/seqid=0 2024-12-08T00:21:28,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742327_1503 (size=48639) 2024-12-08T00:21:28,980 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:28,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617348976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:28,981 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:28,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617348976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:28,981 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:28,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617348976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:28,981 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:28,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617348977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:28,982 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:28,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617348977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:28,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-08T00:21:29,043 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:29,044 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-08T00:21:29,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:29,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. as already flushing 2024-12-08T00:21:29,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:29,044 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:29,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:29,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:29,086 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:29,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617349082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:29,086 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:29,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617349082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:29,087 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:29,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617349082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:29,088 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:29,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617349082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:29,088 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:29,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617349083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:29,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-08T00:21:29,196 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:29,196 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-08T00:21:29,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:29,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. as already flushing 2024-12-08T00:21:29,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:29,197 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:29,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:29,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:29,289 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:29,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617349287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:29,292 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:29,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617349288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:29,292 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:29,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617349288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:29,292 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:29,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617349290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:29,294 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:29,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617349290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:29,349 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:29,349 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-08T00:21:29,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:29,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. as already flushing 2024-12-08T00:21:29,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:29,349 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:29,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:29,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:29,354 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=293, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/b107492b1673490182733c3cf1ffc56e 2024-12-08T00:21:29,361 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/551a1c9d037a4b5b8b01ec6db0c16f1e is 50, key is test_row_0/B:col10/1733617288923/Put/seqid=0 2024-12-08T00:21:29,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742328_1504 (size=12301) 2024-12-08T00:21:29,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-08T00:21:29,502 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:29,502 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-08T00:21:29,502 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:29,502 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. as already flushing 2024-12-08T00:21:29,502 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:29,502 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:29,502 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:29,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:29,593 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:29,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617349590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:29,597 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:29,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617349593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:29,598 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:29,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617349594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:29,600 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:29,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617349594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:29,601 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:29,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617349596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:29,654 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:29,655 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-08T00:21:29,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:29,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. as already flushing 2024-12-08T00:21:29,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:29,655 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:29,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:29,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:29,768 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=293 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/551a1c9d037a4b5b8b01ec6db0c16f1e 2024-12-08T00:21:29,774 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/e382e1dcdbd2468b8ab6a88a17ae7452 is 50, key is test_row_0/C:col10/1733617288923/Put/seqid=0 2024-12-08T00:21:29,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742329_1505 (size=12301) 2024-12-08T00:21:29,807 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:29,808 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-08T00:21:29,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:29,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. as already flushing 2024-12-08T00:21:29,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:29,808 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:29,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:29,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:29,960 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:29,960 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-08T00:21:29,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:29,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. as already flushing 2024-12-08T00:21:29,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:29,961 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:29,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:29,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:29,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-08T00:21:30,099 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:30,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617350095, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:30,100 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:30,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617350098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:30,106 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:30,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617350103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:30,106 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:30,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617350103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:30,107 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:30,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617350105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:30,112 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:30,112 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-08T00:21:30,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:30,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. as already flushing 2024-12-08T00:21:30,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:30,113 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:30,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:30,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:30,178 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=293 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/e382e1dcdbd2468b8ab6a88a17ae7452 2024-12-08T00:21:30,182 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/b107492b1673490182733c3cf1ffc56e as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/b107492b1673490182733c3cf1ffc56e 2024-12-08T00:21:30,186 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/b107492b1673490182733c3cf1ffc56e, entries=250, sequenceid=293, filesize=47.5 K 2024-12-08T00:21:30,187 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/551a1c9d037a4b5b8b01ec6db0c16f1e as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/551a1c9d037a4b5b8b01ec6db0c16f1e 2024-12-08T00:21:30,195 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/551a1c9d037a4b5b8b01ec6db0c16f1e, entries=150, sequenceid=293, filesize=12.0 K 2024-12-08T00:21:30,196 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/e382e1dcdbd2468b8ab6a88a17ae7452 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/e382e1dcdbd2468b8ab6a88a17ae7452 2024-12-08T00:21:30,199 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/e382e1dcdbd2468b8ab6a88a17ae7452, entries=150, sequenceid=293, filesize=12.0 K 2024-12-08T00:21:30,200 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 46a39620848480c2f6f28f4fa1ea64a8 in 1277ms, sequenceid=293, compaction requested=true 2024-12-08T00:21:30,200 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 46a39620848480c2f6f28f4fa1ea64a8: 2024-12-08T00:21:30,200 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:21:30,201 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 46a39620848480c2f6f28f4fa1ea64a8:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:21:30,201 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:30,201 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 46a39620848480c2f6f28f4fa1ea64a8:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T00:21:30,201 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:21:30,201 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 46a39620848480c2f6f28f4fa1ea64a8:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:21:30,201 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-08T00:21:30,201 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:21:30,201 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 111613 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:21:30,201 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): 46a39620848480c2f6f28f4fa1ea64a8/A is initiating minor compaction (all files) 2024-12-08T00:21:30,202 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 46a39620848480c2f6f28f4fa1ea64a8/A in TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:30,202 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/d3f20f97ae38495396a2fe2e5b003cae, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/12818b29a55b4a33bf93aefcbb384b12, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/b107492b1673490182733c3cf1ffc56e] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp, totalSize=109.0 K 2024-12-08T00:21:30,202 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:30,202 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. files: [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/d3f20f97ae38495396a2fe2e5b003cae, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/12818b29a55b4a33bf93aefcbb384b12, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/b107492b1673490182733c3cf1ffc56e] 2024-12-08T00:21:30,202 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting d3f20f97ae38495396a2fe2e5b003cae, keycount=150, bloomtype=ROW, size=31.0 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1733617285501 2024-12-08T00:21:30,203 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 12818b29a55b4a33bf93aefcbb384b12, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1733617286658 2024-12-08T00:21:30,203 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37367 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:21:30,203 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): 46a39620848480c2f6f28f4fa1ea64a8/B is initiating minor compaction (all files) 2024-12-08T00:21:30,203 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 46a39620848480c2f6f28f4fa1ea64a8/B in TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:30,203 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/2885e5b36ccb4848996892d0ace31c5e, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/7ac27aac257b42819e702af4c4800049, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/551a1c9d037a4b5b8b01ec6db0c16f1e] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp, totalSize=36.5 K 2024-12-08T00:21:30,203 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting b107492b1673490182733c3cf1ffc56e, keycount=250, bloomtype=ROW, size=47.5 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1733617287793 2024-12-08T00:21:30,204 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2885e5b36ccb4848996892d0ace31c5e, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1733617285501 2024-12-08T00:21:30,204 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7ac27aac257b42819e702af4c4800049, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1733617286658 2024-12-08T00:21:30,205 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 551a1c9d037a4b5b8b01ec6db0c16f1e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1733617287798 2024-12-08T00:21:30,212 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=46a39620848480c2f6f28f4fa1ea64a8] 2024-12-08T00:21:30,223 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241208257beb5bf95d4a91bb8f4e24a7050daa_46a39620848480c2f6f28f4fa1ea64a8 store=[table=TestAcidGuarantees family=A region=46a39620848480c2f6f28f4fa1ea64a8] 2024-12-08T00:21:30,224 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 46a39620848480c2f6f28f4fa1ea64a8#B#compaction#423 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:30,225 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/a9028ef319a848758dff537108761473 is 50, key is test_row_0/B:col10/1733617288923/Put/seqid=0 2024-12-08T00:21:30,225 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241208257beb5bf95d4a91bb8f4e24a7050daa_46a39620848480c2f6f28f4fa1ea64a8, store=[table=TestAcidGuarantees family=A region=46a39620848480c2f6f28f4fa1ea64a8] 2024-12-08T00:21:30,226 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208257beb5bf95d4a91bb8f4e24a7050daa_46a39620848480c2f6f28f4fa1ea64a8 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=46a39620848480c2f6f28f4fa1ea64a8] 2024-12-08T00:21:30,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742330_1506 (size=13017) 2024-12-08T00:21:30,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742331_1507 (size=4469) 2024-12-08T00:21:30,248 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 46a39620848480c2f6f28f4fa1ea64a8#A#compaction#422 average throughput is 0.68 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:30,249 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/4b10f1a9132d48febcd8cbdd858cc56b is 175, key is test_row_0/A:col10/1733617288923/Put/seqid=0 2024-12-08T00:21:30,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742332_1508 (size=31971) 2024-12-08T00:21:30,264 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:30,265 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-08T00:21:30,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:30,265 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2837): Flushing 46a39620848480c2f6f28f4fa1ea64a8 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-08T00:21:30,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=A 2024-12-08T00:21:30,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:30,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=B 2024-12-08T00:21:30,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:30,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=C 2024-12-08T00:21:30,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:30,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412081d965d2eb91743548b42b58c41558300_46a39620848480c2f6f28f4fa1ea64a8 is 50, key is test_row_0/A:col10/1733617288975/Put/seqid=0 2024-12-08T00:21:30,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742333_1509 (size=12454) 2024-12-08T00:21:30,635 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/a9028ef319a848758dff537108761473 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/a9028ef319a848758dff537108761473 2024-12-08T00:21:30,640 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 46a39620848480c2f6f28f4fa1ea64a8/B of 46a39620848480c2f6f28f4fa1ea64a8 into a9028ef319a848758dff537108761473(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:30,640 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 46a39620848480c2f6f28f4fa1ea64a8: 2024-12-08T00:21:30,640 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8., storeName=46a39620848480c2f6f28f4fa1ea64a8/B, priority=13, startTime=1733617290201; duration=0sec 2024-12-08T00:21:30,640 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:21:30,640 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 46a39620848480c2f6f28f4fa1ea64a8:B 2024-12-08T00:21:30,640 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:21:30,641 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37367 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:21:30,641 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): 46a39620848480c2f6f28f4fa1ea64a8/C is initiating minor compaction (all files) 2024-12-08T00:21:30,641 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 46a39620848480c2f6f28f4fa1ea64a8/C in TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:30,641 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/eb05a1d583c94f359cd51722c21840b6, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/0592b361dbc84878b7cf9f0985277137, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/e382e1dcdbd2468b8ab6a88a17ae7452] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp, totalSize=36.5 K 2024-12-08T00:21:30,642 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting eb05a1d583c94f359cd51722c21840b6, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1733617285501 2024-12-08T00:21:30,642 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0592b361dbc84878b7cf9f0985277137, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1733617286658 2024-12-08T00:21:30,643 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting e382e1dcdbd2468b8ab6a88a17ae7452, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1733617287798 2024-12-08T00:21:30,648 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 46a39620848480c2f6f28f4fa1ea64a8#C#compaction#425 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:30,649 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/046950187e2d45d0adfb4203d2b7f04a is 50, key is test_row_0/C:col10/1733617288923/Put/seqid=0 2024-12-08T00:21:30,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742334_1510 (size=13017) 2024-12-08T00:21:30,656 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/4b10f1a9132d48febcd8cbdd858cc56b as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/4b10f1a9132d48febcd8cbdd858cc56b 2024-12-08T00:21:30,661 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 46a39620848480c2f6f28f4fa1ea64a8/A of 46a39620848480c2f6f28f4fa1ea64a8 into 4b10f1a9132d48febcd8cbdd858cc56b(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:30,661 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 46a39620848480c2f6f28f4fa1ea64a8: 2024-12-08T00:21:30,661 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8., storeName=46a39620848480c2f6f28f4fa1ea64a8/A, priority=13, startTime=1733617290200; duration=0sec 2024-12-08T00:21:30,661 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:30,661 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 46a39620848480c2f6f28f4fa1ea64a8:A 2024-12-08T00:21:30,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:30,682 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412081d965d2eb91743548b42b58c41558300_46a39620848480c2f6f28f4fa1ea64a8 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412081d965d2eb91743548b42b58c41558300_46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:30,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/95826a910b2f49fcb3fe1827d0b61d22, store: [table=TestAcidGuarantees family=A region=46a39620848480c2f6f28f4fa1ea64a8] 2024-12-08T00:21:30,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/95826a910b2f49fcb3fe1827d0b61d22 is 175, key is test_row_0/A:col10/1733617288975/Put/seqid=0 2024-12-08T00:21:30,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742335_1511 (size=31255) 2024-12-08T00:21:30,688 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=313, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/95826a910b2f49fcb3fe1827d0b61d22 2024-12-08T00:21:30,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/01f49b19a7144015a5e510309cf3440a is 50, key is test_row_0/B:col10/1733617288975/Put/seqid=0 2024-12-08T00:21:30,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742336_1512 (size=12301) 2024-12-08T00:21:30,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-08T00:21:31,057 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/046950187e2d45d0adfb4203d2b7f04a as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/046950187e2d45d0adfb4203d2b7f04a 2024-12-08T00:21:31,060 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 46a39620848480c2f6f28f4fa1ea64a8/C of 46a39620848480c2f6f28f4fa1ea64a8 into 046950187e2d45d0adfb4203d2b7f04a(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:31,060 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 46a39620848480c2f6f28f4fa1ea64a8: 2024-12-08T00:21:31,060 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8., storeName=46a39620848480c2f6f28f4fa1ea64a8/C, priority=13, startTime=1733617290201; duration=0sec 2024-12-08T00:21:31,060 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:31,060 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 46a39620848480c2f6f28f4fa1ea64a8:C 2024-12-08T00:21:31,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on 46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:31,105 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. as already flushing 2024-12-08T00:21:31,111 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=313 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/01f49b19a7144015a5e510309cf3440a 2024-12-08T00:21:31,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/491bbcb79dc247429965548ff01b8320 is 50, key is test_row_0/C:col10/1733617288975/Put/seqid=0 2024-12-08T00:21:31,128 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:31,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617351125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:31,131 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:31,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617351126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:31,132 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:31,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617351127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:31,132 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:31,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617351128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:31,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742337_1513 (size=12301) 2024-12-08T00:21:31,133 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:31,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617351129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:31,133 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=313 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/491bbcb79dc247429965548ff01b8320 2024-12-08T00:21:31,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/95826a910b2f49fcb3fe1827d0b61d22 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/95826a910b2f49fcb3fe1827d0b61d22 2024-12-08T00:21:31,140 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/95826a910b2f49fcb3fe1827d0b61d22, entries=150, sequenceid=313, filesize=30.5 K 2024-12-08T00:21:31,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/01f49b19a7144015a5e510309cf3440a as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/01f49b19a7144015a5e510309cf3440a 2024-12-08T00:21:31,144 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/01f49b19a7144015a5e510309cf3440a, entries=150, sequenceid=313, filesize=12.0 K 2024-12-08T00:21:31,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/491bbcb79dc247429965548ff01b8320 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/491bbcb79dc247429965548ff01b8320 2024-12-08T00:21:31,148 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/491bbcb79dc247429965548ff01b8320, entries=150, sequenceid=313, filesize=12.0 K 2024-12-08T00:21:31,149 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=93.93 KB/96180 for 46a39620848480c2f6f28f4fa1ea64a8 in 884ms, sequenceid=313, compaction requested=false 2024-12-08T00:21:31,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2538): Flush status journal for 46a39620848480c2f6f28f4fa1ea64a8: 2024-12-08T00:21:31,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:31,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=121 2024-12-08T00:21:31,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4106): Remote procedure done, pid=121 2024-12-08T00:21:31,151 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=121, resume processing ppid=120 2024-12-08T00:21:31,151 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=121, ppid=120, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2580 sec 2024-12-08T00:21:31,154 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=120, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees in 2.2630 sec 2024-12-08T00:21:31,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on 46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:31,233 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 46a39620848480c2f6f28f4fa1ea64a8 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-08T00:21:31,233 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=A 2024-12-08T00:21:31,233 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:31,233 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=B 2024-12-08T00:21:31,233 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:31,233 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=C 2024-12-08T00:21:31,233 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:31,248 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412084768058e262f4935a268c70edd5615a9_46a39620848480c2f6f28f4fa1ea64a8 is 50, key is test_row_0/A:col10/1733617291231/Put/seqid=0 2024-12-08T00:21:31,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742338_1514 (size=14994) 2024-12-08T00:21:31,252 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:31,256 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412084768058e262f4935a268c70edd5615a9_46a39620848480c2f6f28f4fa1ea64a8 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412084768058e262f4935a268c70edd5615a9_46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:31,257 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/d102726ec07340bd94c38cec2fddbd8b, store: [table=TestAcidGuarantees family=A region=46a39620848480c2f6f28f4fa1ea64a8] 2024-12-08T00:21:31,257 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/d102726ec07340bd94c38cec2fddbd8b is 175, key is test_row_0/A:col10/1733617291231/Put/seqid=0 2024-12-08T00:21:31,261 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:31,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617351251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:31,261 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:31,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617351255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:31,262 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:31,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617351256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:31,262 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:31,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617351258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:31,266 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:31,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617351261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:31,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742339_1515 (size=39949) 2024-12-08T00:21:31,277 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=334, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/d102726ec07340bd94c38cec2fddbd8b 2024-12-08T00:21:31,287 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/ff6f07bc58294f60a9bb2ae341c22b4a is 50, key is test_row_0/B:col10/1733617291231/Put/seqid=0 2024-12-08T00:21:31,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742340_1516 (size=12301) 2024-12-08T00:21:31,365 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:31,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617351362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:31,366 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:31,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617351362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:31,366 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:31,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617351363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:31,366 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:31,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617351363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:31,371 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:31,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617351367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:31,567 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:31,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617351566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:31,573 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:31,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617351568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:31,575 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:31,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617351568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:31,575 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:31,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617351568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:31,576 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:31,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617351573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:31,692 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=334 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/ff6f07bc58294f60a9bb2ae341c22b4a 2024-12-08T00:21:31,698 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/18e2578783fd4f17a90c478508f024b8 is 50, key is test_row_0/C:col10/1733617291231/Put/seqid=0 2024-12-08T00:21:31,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742341_1517 (size=12301) 2024-12-08T00:21:31,872 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:31,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617351869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:31,878 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:31,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617351875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:31,878 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:31,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617351877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:31,881 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:31,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617351877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:31,881 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:31,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617351878, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:32,103 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=334 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/18e2578783fd4f17a90c478508f024b8 2024-12-08T00:21:32,107 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/d102726ec07340bd94c38cec2fddbd8b as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/d102726ec07340bd94c38cec2fddbd8b 2024-12-08T00:21:32,110 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/d102726ec07340bd94c38cec2fddbd8b, entries=200, sequenceid=334, filesize=39.0 K 2024-12-08T00:21:32,111 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/ff6f07bc58294f60a9bb2ae341c22b4a as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/ff6f07bc58294f60a9bb2ae341c22b4a 2024-12-08T00:21:32,114 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/ff6f07bc58294f60a9bb2ae341c22b4a, entries=150, sequenceid=334, filesize=12.0 K 2024-12-08T00:21:32,115 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/18e2578783fd4f17a90c478508f024b8 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/18e2578783fd4f17a90c478508f024b8 2024-12-08T00:21:32,118 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/18e2578783fd4f17a90c478508f024b8, entries=150, sequenceid=334, filesize=12.0 K 2024-12-08T00:21:32,119 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 46a39620848480c2f6f28f4fa1ea64a8 in 887ms, sequenceid=334, compaction requested=true 2024-12-08T00:21:32,119 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 46a39620848480c2f6f28f4fa1ea64a8: 2024-12-08T00:21:32,119 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 46a39620848480c2f6f28f4fa1ea64a8:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:21:32,119 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:32,119 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 46a39620848480c2f6f28f4fa1ea64a8:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T00:21:32,119 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:21:32,119 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:32,119 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 46a39620848480c2f6f28f4fa1ea64a8:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:21:32,119 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:21:32,119 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:21:32,120 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103175 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:21:32,120 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): 46a39620848480c2f6f28f4fa1ea64a8/A is initiating minor compaction (all files) 2024-12-08T00:21:32,120 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 46a39620848480c2f6f28f4fa1ea64a8/A in TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:32,120 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/4b10f1a9132d48febcd8cbdd858cc56b, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/95826a910b2f49fcb3fe1827d0b61d22, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/d102726ec07340bd94c38cec2fddbd8b] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp, totalSize=100.8 K 2024-12-08T00:21:32,120 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:32,120 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. files: [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/4b10f1a9132d48febcd8cbdd858cc56b, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/95826a910b2f49fcb3fe1827d0b61d22, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/d102726ec07340bd94c38cec2fddbd8b] 2024-12-08T00:21:32,120 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:21:32,120 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): 46a39620848480c2f6f28f4fa1ea64a8/B is initiating minor compaction (all files) 2024-12-08T00:21:32,121 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 46a39620848480c2f6f28f4fa1ea64a8/B in TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:32,121 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4b10f1a9132d48febcd8cbdd858cc56b, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1733617287798 2024-12-08T00:21:32,121 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/a9028ef319a848758dff537108761473, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/01f49b19a7144015a5e510309cf3440a, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/ff6f07bc58294f60a9bb2ae341c22b4a] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp, totalSize=36.7 K 2024-12-08T00:21:32,121 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 95826a910b2f49fcb3fe1827d0b61d22, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=313, earliestPutTs=1733617288948 2024-12-08T00:21:32,121 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting a9028ef319a848758dff537108761473, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1733617287798 2024-12-08T00:21:32,121 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting d102726ec07340bd94c38cec2fddbd8b, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1733617291124 2024-12-08T00:21:32,121 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 01f49b19a7144015a5e510309cf3440a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=313, earliestPutTs=1733617288948 2024-12-08T00:21:32,122 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting ff6f07bc58294f60a9bb2ae341c22b4a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1733617291124 2024-12-08T00:21:32,127 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=46a39620848480c2f6f28f4fa1ea64a8] 2024-12-08T00:21:32,129 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241208f6e786f5dd444c9baac7697622efefa0_46a39620848480c2f6f28f4fa1ea64a8 store=[table=TestAcidGuarantees family=A region=46a39620848480c2f6f28f4fa1ea64a8] 2024-12-08T00:21:32,130 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 46a39620848480c2f6f28f4fa1ea64a8#B#compaction#432 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:32,130 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/fe293ece20124ee7a8e8b4427b15b6f2 is 50, key is test_row_0/B:col10/1733617291231/Put/seqid=0 2024-12-08T00:21:32,131 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241208f6e786f5dd444c9baac7697622efefa0_46a39620848480c2f6f28f4fa1ea64a8, store=[table=TestAcidGuarantees family=A region=46a39620848480c2f6f28f4fa1ea64a8] 2024-12-08T00:21:32,131 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208f6e786f5dd444c9baac7697622efefa0_46a39620848480c2f6f28f4fa1ea64a8 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=46a39620848480c2f6f28f4fa1ea64a8] 2024-12-08T00:21:32,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742343_1519 (size=4469) 2024-12-08T00:21:32,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742342_1518 (size=13119) 2024-12-08T00:21:32,146 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/fe293ece20124ee7a8e8b4427b15b6f2 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/fe293ece20124ee7a8e8b4427b15b6f2 2024-12-08T00:21:32,150 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 46a39620848480c2f6f28f4fa1ea64a8/B of 46a39620848480c2f6f28f4fa1ea64a8 into fe293ece20124ee7a8e8b4427b15b6f2(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:32,150 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 46a39620848480c2f6f28f4fa1ea64a8: 2024-12-08T00:21:32,150 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8., storeName=46a39620848480c2f6f28f4fa1ea64a8/B, priority=13, startTime=1733617292119; duration=0sec 2024-12-08T00:21:32,150 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:21:32,150 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 46a39620848480c2f6f28f4fa1ea64a8:B 2024-12-08T00:21:32,150 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:21:32,151 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:21:32,151 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): 46a39620848480c2f6f28f4fa1ea64a8/C is initiating minor compaction (all files) 2024-12-08T00:21:32,151 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 46a39620848480c2f6f28f4fa1ea64a8/C in TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:32,151 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/046950187e2d45d0adfb4203d2b7f04a, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/491bbcb79dc247429965548ff01b8320, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/18e2578783fd4f17a90c478508f024b8] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp, totalSize=36.7 K 2024-12-08T00:21:32,152 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 046950187e2d45d0adfb4203d2b7f04a, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1733617287798 2024-12-08T00:21:32,152 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 491bbcb79dc247429965548ff01b8320, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=313, earliestPutTs=1733617288948 2024-12-08T00:21:32,153 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 18e2578783fd4f17a90c478508f024b8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1733617291124 2024-12-08T00:21:32,158 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 46a39620848480c2f6f28f4fa1ea64a8#C#compaction#433 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:32,159 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/272d46cb24a6483fb5ec787dc25cffc8 is 50, key is test_row_0/C:col10/1733617291231/Put/seqid=0 2024-12-08T00:21:32,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742344_1520 (size=13119) 2024-12-08T00:21:32,166 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/272d46cb24a6483fb5ec787dc25cffc8 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/272d46cb24a6483fb5ec787dc25cffc8 2024-12-08T00:21:32,171 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 46a39620848480c2f6f28f4fa1ea64a8/C of 46a39620848480c2f6f28f4fa1ea64a8 into 272d46cb24a6483fb5ec787dc25cffc8(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:32,171 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 46a39620848480c2f6f28f4fa1ea64a8: 2024-12-08T00:21:32,171 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8., storeName=46a39620848480c2f6f28f4fa1ea64a8/C, priority=13, startTime=1733617292119; duration=0sec 2024-12-08T00:21:32,171 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:32,171 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 46a39620848480c2f6f28f4fa1ea64a8:C 2024-12-08T00:21:32,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on 46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:32,380 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 46a39620848480c2f6f28f4fa1ea64a8 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-08T00:21:32,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=A 2024-12-08T00:21:32,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:32,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=B 2024-12-08T00:21:32,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:32,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=C 2024-12-08T00:21:32,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:32,387 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412083a35fae033b640ea9ed799c319fd02c7_46a39620848480c2f6f28f4fa1ea64a8 is 50, key is test_row_0/A:col10/1733617291257/Put/seqid=0 2024-12-08T00:21:32,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742345_1521 (size=14994) 2024-12-08T00:21:32,401 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:32,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617352396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:32,405 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:32,405 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:32,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617352397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:32,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617352396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:32,406 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:32,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617352401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:32,406 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:32,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617352401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:32,505 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:32,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617352502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:32,509 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:32,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617352506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:32,513 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:32,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617352506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:32,513 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:32,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617352507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:32,513 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:32,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617352507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:32,538 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 46a39620848480c2f6f28f4fa1ea64a8#A#compaction#431 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:32,539 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/e8f158cf3b104a5e9c02f5e18058bfb0 is 175, key is test_row_0/A:col10/1733617291231/Put/seqid=0 2024-12-08T00:21:32,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742346_1522 (size=32073) 2024-12-08T00:21:32,708 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:32,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617352706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:32,714 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:32,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617352711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:32,715 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:32,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617352714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:32,717 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:32,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617352714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:32,717 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:32,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617352714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:32,793 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:32,797 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412083a35fae033b640ea9ed799c319fd02c7_46a39620848480c2f6f28f4fa1ea64a8 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412083a35fae033b640ea9ed799c319fd02c7_46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:32,797 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/426f553d47c14f82814d752f5c526cf9, store: [table=TestAcidGuarantees family=A region=46a39620848480c2f6f28f4fa1ea64a8] 2024-12-08T00:21:32,798 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/426f553d47c14f82814d752f5c526cf9 is 175, key is test_row_0/A:col10/1733617291257/Put/seqid=0 2024-12-08T00:21:32,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742347_1523 (size=39949) 2024-12-08T00:21:32,951 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/e8f158cf3b104a5e9c02f5e18058bfb0 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/e8f158cf3b104a5e9c02f5e18058bfb0 2024-12-08T00:21:32,962 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 46a39620848480c2f6f28f4fa1ea64a8/A of 46a39620848480c2f6f28f4fa1ea64a8 into e8f158cf3b104a5e9c02f5e18058bfb0(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:32,962 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 46a39620848480c2f6f28f4fa1ea64a8: 2024-12-08T00:21:32,962 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8., storeName=46a39620848480c2f6f28f4fa1ea64a8/A, priority=13, startTime=1733617292119; duration=0sec 2024-12-08T00:21:32,962 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:32,962 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 46a39620848480c2f6f28f4fa1ea64a8:A 2024-12-08T00:21:32,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-08T00:21:32,996 INFO [Thread-1871 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 120 completed 2024-12-08T00:21:32,998 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T00:21:32,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=122, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees 2024-12-08T00:21:32,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-08T00:21:32,999 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=122, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:21:33,001 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=122, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:21:33,002 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:21:33,015 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:33,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617353012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:33,018 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:33,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617353015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:33,019 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:33,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617353018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:33,019 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:33,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617353018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:33,025 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:33,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617353020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:33,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-08T00:21:33,155 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:33,156 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-12-08T00:21:33,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:33,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. as already flushing 2024-12-08T00:21:33,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:33,156 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:33,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:33,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:33,205 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=355, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/426f553d47c14f82814d752f5c526cf9 2024-12-08T00:21:33,212 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/530bc85f2a1d45818b495d48a7b77672 is 50, key is test_row_0/B:col10/1733617291257/Put/seqid=0 2024-12-08T00:21:33,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742348_1524 (size=12301) 2024-12-08T00:21:33,216 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=355 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/530bc85f2a1d45818b495d48a7b77672 2024-12-08T00:21:33,232 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/12618561106947d8bcb84f9ac7e90228 is 50, key is test_row_0/C:col10/1733617291257/Put/seqid=0 2024-12-08T00:21:33,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742349_1525 (size=12301) 2024-12-08T00:21:33,240 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=355 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/12618561106947d8bcb84f9ac7e90228 2024-12-08T00:21:33,244 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/426f553d47c14f82814d752f5c526cf9 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/426f553d47c14f82814d752f5c526cf9 2024-12-08T00:21:33,248 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/426f553d47c14f82814d752f5c526cf9, entries=200, sequenceid=355, filesize=39.0 K 2024-12-08T00:21:33,249 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/530bc85f2a1d45818b495d48a7b77672 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/530bc85f2a1d45818b495d48a7b77672 2024-12-08T00:21:33,254 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/530bc85f2a1d45818b495d48a7b77672, entries=150, sequenceid=355, filesize=12.0 K 2024-12-08T00:21:33,255 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/12618561106947d8bcb84f9ac7e90228 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/12618561106947d8bcb84f9ac7e90228 2024-12-08T00:21:33,259 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/12618561106947d8bcb84f9ac7e90228, entries=150, sequenceid=355, filesize=12.0 K 2024-12-08T00:21:33,260 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 46a39620848480c2f6f28f4fa1ea64a8 in 881ms, sequenceid=355, compaction requested=false 2024-12-08T00:21:33,260 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 46a39620848480c2f6f28f4fa1ea64a8: 2024-12-08T00:21:33,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-08T00:21:33,308 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:33,309 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-12-08T00:21:33,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:33,309 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2837): Flushing 46a39620848480c2f6f28f4fa1ea64a8 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-08T00:21:33,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=A 2024-12-08T00:21:33,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:33,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=B 2024-12-08T00:21:33,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:33,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=C 2024-12-08T00:21:33,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:33,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412083478fc8925924ffc814dc02ce2a7afc4_46a39620848480c2f6f28f4fa1ea64a8 is 50, key is test_row_0/A:col10/1733617292400/Put/seqid=0 2024-12-08T00:21:33,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742350_1526 (size=12454) 2024-12-08T00:21:33,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on 46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:33,521 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. as already flushing 2024-12-08T00:21:33,549 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:33,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617353540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:33,549 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:33,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617353540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:33,550 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:33,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617353541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:33,550 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:33,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617353544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:33,553 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:33,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617353545, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:33,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-08T00:21:33,651 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:33,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617353650, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:33,652 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:33,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617353650, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:33,652 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:33,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617353651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:33,652 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:33,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617353651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:33,659 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:33,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617353654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:33,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:33,742 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412083478fc8925924ffc814dc02ce2a7afc4_46a39620848480c2f6f28f4fa1ea64a8 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412083478fc8925924ffc814dc02ce2a7afc4_46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:33,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/51a649d9d307429793138300381100dc, store: [table=TestAcidGuarantees family=A region=46a39620848480c2f6f28f4fa1ea64a8] 2024-12-08T00:21:33,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/51a649d9d307429793138300381100dc is 175, key is test_row_0/A:col10/1733617292400/Put/seqid=0 2024-12-08T00:21:33,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742351_1527 (size=31255) 2024-12-08T00:21:33,856 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:33,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617353853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:33,856 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:33,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617353853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:33,856 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:33,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617353853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:33,856 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:33,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617353854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:33,863 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:33,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617353860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:34,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-08T00:21:34,148 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=373, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/51a649d9d307429793138300381100dc 2024-12-08T00:21:34,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/70bc9ecc1be749cdbc6a8132caa11a11 is 50, key is test_row_0/B:col10/1733617292400/Put/seqid=0 2024-12-08T00:21:34,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742352_1528 (size=12301) 2024-12-08T00:21:34,159 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:34,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617354157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:34,159 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:34,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617354157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:34,160 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:34,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617354157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:34,160 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:34,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617354158, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:34,168 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:34,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617354164, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:34,557 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=373 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/70bc9ecc1be749cdbc6a8132caa11a11 2024-12-08T00:21:34,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/2a11577cb4914abea0983e053c655361 is 50, key is test_row_0/C:col10/1733617292400/Put/seqid=0 2024-12-08T00:21:34,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742353_1529 (size=12301) 2024-12-08T00:21:34,568 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=373 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/2a11577cb4914abea0983e053c655361 2024-12-08T00:21:34,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/51a649d9d307429793138300381100dc as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/51a649d9d307429793138300381100dc 2024-12-08T00:21:34,576 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/51a649d9d307429793138300381100dc, entries=150, sequenceid=373, filesize=30.5 K 2024-12-08T00:21:34,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/70bc9ecc1be749cdbc6a8132caa11a11 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/70bc9ecc1be749cdbc6a8132caa11a11 2024-12-08T00:21:34,580 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/70bc9ecc1be749cdbc6a8132caa11a11, entries=150, sequenceid=373, filesize=12.0 K 2024-12-08T00:21:34,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/2a11577cb4914abea0983e053c655361 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/2a11577cb4914abea0983e053c655361 2024-12-08T00:21:34,584 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/2a11577cb4914abea0983e053c655361, entries=150, sequenceid=373, filesize=12.0 K 2024-12-08T00:21:34,585 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 46a39620848480c2f6f28f4fa1ea64a8 in 1276ms, sequenceid=373, compaction requested=true 2024-12-08T00:21:34,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2538): Flush status journal for 46a39620848480c2f6f28f4fa1ea64a8: 2024-12-08T00:21:34,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:34,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=123 2024-12-08T00:21:34,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4106): Remote procedure done, pid=123 2024-12-08T00:21:34,587 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=123, resume processing ppid=122 2024-12-08T00:21:34,587 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=123, ppid=122, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5840 sec 2024-12-08T00:21:34,589 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=122, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees in 1.5890 sec 2024-12-08T00:21:34,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on 46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:34,666 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 46a39620848480c2f6f28f4fa1ea64a8 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-08T00:21:34,667 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=A 2024-12-08T00:21:34,667 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:34,667 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=B 2024-12-08T00:21:34,667 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:34,667 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=C 2024-12-08T00:21:34,667 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:34,674 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412085f438bf1b70e481abd7021c54947ed63_46a39620848480c2f6f28f4fa1ea64a8 is 50, key is test_row_0/A:col10/1733617294666/Put/seqid=0 2024-12-08T00:21:34,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742354_1530 (size=17534) 2024-12-08T00:21:34,690 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:34,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617354685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:34,692 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:34,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617354685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:34,692 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:34,692 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:34,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617354686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:34,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617354687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:34,693 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:34,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617354688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:34,793 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:34,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617354791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:34,797 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:34,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617354793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:34,797 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:34,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617354794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:34,798 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:34,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617354794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:34,799 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:34,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617354794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:34,995 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:34,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617354994, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:35,003 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:35,003 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:35,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617354998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:35,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617354999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:35,006 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:35,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617355001, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:35,006 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:35,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617355002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:35,080 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:35,083 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412085f438bf1b70e481abd7021c54947ed63_46a39620848480c2f6f28f4fa1ea64a8 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412085f438bf1b70e481abd7021c54947ed63_46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:35,084 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/602c1b45352a4c08bc26af2afd3b0136, store: [table=TestAcidGuarantees family=A region=46a39620848480c2f6f28f4fa1ea64a8] 2024-12-08T00:21:35,085 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/602c1b45352a4c08bc26af2afd3b0136 is 175, key is test_row_0/A:col10/1733617294666/Put/seqid=0 2024-12-08T00:21:35,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742355_1531 (size=48639) 2024-12-08T00:21:35,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-08T00:21:35,106 INFO [Thread-1871 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 122 completed 2024-12-08T00:21:35,107 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T00:21:35,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=124, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees 2024-12-08T00:21:35,109 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=124, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:21:35,109 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=124, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:21:35,109 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=125, ppid=124, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:21:35,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-08T00:21:35,141 DEBUG [Thread-1876 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6584e9ce to 127.0.0.1:62287 2024-12-08T00:21:35,141 DEBUG [Thread-1876 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:21:35,142 DEBUG [Thread-1872 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1e247aa1 to 127.0.0.1:62287 2024-12-08T00:21:35,142 DEBUG [Thread-1872 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:21:35,142 DEBUG [Thread-1878 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x37ec8e3b to 127.0.0.1:62287 2024-12-08T00:21:35,142 DEBUG [Thread-1878 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:21:35,147 DEBUG [Thread-1874 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2205f666 to 127.0.0.1:62287 2024-12-08T00:21:35,147 DEBUG [Thread-1874 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:21:35,149 DEBUG [Thread-1880 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x787e5169 to 127.0.0.1:62287 2024-12-08T00:21:35,149 DEBUG [Thread-1880 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:21:35,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-08T00:21:35,261 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:35,261 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-08T00:21:35,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:35,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. as already flushing 2024-12-08T00:21:35,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:35,262 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:35,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:35,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:35,297 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:35,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617355297, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:35,305 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:35,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617355305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:35,306 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:35,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617355306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:35,308 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:35,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617355308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:35,309 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:35,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617355309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:35,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-08T00:21:35,413 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:35,414 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-08T00:21:35,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:35,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. as already flushing 2024-12-08T00:21:35,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:35,414 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:35,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:35,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:35,489 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=394, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/602c1b45352a4c08bc26af2afd3b0136 2024-12-08T00:21:35,495 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/e3686a9d3e844c02a5765562faa2edce is 50, key is test_row_0/B:col10/1733617294666/Put/seqid=0 2024-12-08T00:21:35,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742356_1532 (size=12301) 2024-12-08T00:21:35,566 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:35,566 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-08T00:21:35,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:35,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. as already flushing 2024-12-08T00:21:35,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:35,566 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:35,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:35,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:35,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-08T00:21:35,718 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:35,719 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-08T00:21:35,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:35,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. as already flushing 2024-12-08T00:21:35,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:35,719 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:35,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:35,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:35,799 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:35,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1733617355798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:35,806 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:35,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51278 deadline: 1733617355806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:35,811 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:35,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51294 deadline: 1733617355811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:35,811 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:35,811 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:35,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51334 deadline: 1733617355811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:35,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51310 deadline: 1733617355811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:35,871 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:35,871 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-08T00:21:35,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:35,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. as already flushing 2024-12-08T00:21:35,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:35,872 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:35,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:35,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:35,898 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=394 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/e3686a9d3e844c02a5765562faa2edce 2024-12-08T00:21:35,904 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/39c3ab6abbc44bdda6c230188aa76737 is 50, key is test_row_0/C:col10/1733617294666/Put/seqid=0 2024-12-08T00:21:35,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742357_1533 (size=12301) 2024-12-08T00:21:36,023 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:36,024 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-08T00:21:36,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:36,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. as already flushing 2024-12-08T00:21:36,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:36,024 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:36,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:36,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:36,176 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:36,177 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-08T00:21:36,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:36,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. as already flushing 2024-12-08T00:21:36,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:36,177 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:36,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:36,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:36,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-08T00:21:36,308 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=394 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/39c3ab6abbc44bdda6c230188aa76737 2024-12-08T00:21:36,311 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/602c1b45352a4c08bc26af2afd3b0136 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/602c1b45352a4c08bc26af2afd3b0136 2024-12-08T00:21:36,314 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/602c1b45352a4c08bc26af2afd3b0136, entries=250, sequenceid=394, filesize=47.5 K 2024-12-08T00:21:36,314 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/e3686a9d3e844c02a5765562faa2edce as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/e3686a9d3e844c02a5765562faa2edce 2024-12-08T00:21:36,317 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/e3686a9d3e844c02a5765562faa2edce, entries=150, sequenceid=394, filesize=12.0 K 2024-12-08T00:21:36,317 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/39c3ab6abbc44bdda6c230188aa76737 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/39c3ab6abbc44bdda6c230188aa76737 2024-12-08T00:21:36,320 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/39c3ab6abbc44bdda6c230188aa76737, entries=150, sequenceid=394, filesize=12.0 K 2024-12-08T00:21:36,320 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=87.22 KB/89310 for 46a39620848480c2f6f28f4fa1ea64a8 in 1654ms, sequenceid=394, compaction requested=true 2024-12-08T00:21:36,320 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 46a39620848480c2f6f28f4fa1ea64a8: 2024-12-08T00:21:36,321 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 46a39620848480c2f6f28f4fa1ea64a8:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:21:36,321 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:36,321 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 46a39620848480c2f6f28f4fa1ea64a8:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T00:21:36,321 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:36,321 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 46a39620848480c2f6f28f4fa1ea64a8:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:21:36,321 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T00:21:36,321 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T00:21:36,321 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:21:36,321 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 151916 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T00:21:36,321 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50022 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T00:21:36,322 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): 46a39620848480c2f6f28f4fa1ea64a8/A is initiating minor compaction (all files) 2024-12-08T00:21:36,322 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): 46a39620848480c2f6f28f4fa1ea64a8/B is initiating minor compaction (all files) 2024-12-08T00:21:36,322 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 46a39620848480c2f6f28f4fa1ea64a8/A in TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:36,322 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 46a39620848480c2f6f28f4fa1ea64a8/B in TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:36,322 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/e8f158cf3b104a5e9c02f5e18058bfb0, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/426f553d47c14f82814d752f5c526cf9, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/51a649d9d307429793138300381100dc, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/602c1b45352a4c08bc26af2afd3b0136] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp, totalSize=148.4 K 2024-12-08T00:21:36,322 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/fe293ece20124ee7a8e8b4427b15b6f2, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/530bc85f2a1d45818b495d48a7b77672, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/70bc9ecc1be749cdbc6a8132caa11a11, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/e3686a9d3e844c02a5765562faa2edce] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp, totalSize=48.8 K 2024-12-08T00:21:36,322 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:36,322 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. files: [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/e8f158cf3b104a5e9c02f5e18058bfb0, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/426f553d47c14f82814d752f5c526cf9, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/51a649d9d307429793138300381100dc, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/602c1b45352a4c08bc26af2afd3b0136] 2024-12-08T00:21:36,322 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting fe293ece20124ee7a8e8b4427b15b6f2, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1733617291124 2024-12-08T00:21:36,322 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting e8f158cf3b104a5e9c02f5e18058bfb0, keycount=150, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1733617291124 2024-12-08T00:21:36,322 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 530bc85f2a1d45818b495d48a7b77672, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=355, earliestPutTs=1733617291257 2024-12-08T00:21:36,322 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 426f553d47c14f82814d752f5c526cf9, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=355, earliestPutTs=1733617291253 2024-12-08T00:21:36,323 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 70bc9ecc1be749cdbc6a8132caa11a11, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=373, earliestPutTs=1733617292395 2024-12-08T00:21:36,323 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 51a649d9d307429793138300381100dc, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=373, earliestPutTs=1733617292395 2024-12-08T00:21:36,323 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting e3686a9d3e844c02a5765562faa2edce, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=394, earliestPutTs=1733617293543 2024-12-08T00:21:36,323 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 602c1b45352a4c08bc26af2afd3b0136, keycount=250, bloomtype=ROW, size=47.5 K, encoding=NONE, compression=NONE, seqNum=394, earliestPutTs=1733617293540 2024-12-08T00:21:36,329 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:36,329 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-08T00:21:36,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:36,329 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2837): Flushing 46a39620848480c2f6f28f4fa1ea64a8 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-08T00:21:36,329 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=46a39620848480c2f6f28f4fa1ea64a8] 2024-12-08T00:21:36,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=A 2024-12-08T00:21:36,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:36,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=B 2024-12-08T00:21:36,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:36,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=C 2024-12-08T00:21:36,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:36,332 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 46a39620848480c2f6f28f4fa1ea64a8#B#compaction#443 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:36,333 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/4e40dbea41bd42a1af8e2e050ac0c95f is 50, key is test_row_0/B:col10/1733617294666/Put/seqid=0 2024-12-08T00:21:36,333 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024120826fd101e8fb34643b7369275bff7db51_46a39620848480c2f6f28f4fa1ea64a8 store=[table=TestAcidGuarantees family=A region=46a39620848480c2f6f28f4fa1ea64a8] 2024-12-08T00:21:36,341 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024120826fd101e8fb34643b7369275bff7db51_46a39620848480c2f6f28f4fa1ea64a8, store=[table=TestAcidGuarantees family=A region=46a39620848480c2f6f28f4fa1ea64a8] 2024-12-08T00:21:36,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208a273412759334b1b90d6b3de8278165b_46a39620848480c2f6f28f4fa1ea64a8 is 50, key is test_row_0/A:col10/1733617294683/Put/seqid=0 2024-12-08T00:21:36,341 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120826fd101e8fb34643b7369275bff7db51_46a39620848480c2f6f28f4fa1ea64a8 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=46a39620848480c2f6f28f4fa1ea64a8] 2024-12-08T00:21:36,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742358_1534 (size=13255) 2024-12-08T00:21:36,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742360_1536 (size=4469) 2024-12-08T00:21:36,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742359_1535 (size=12454) 2024-12-08T00:21:36,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:36,357 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208a273412759334b1b90d6b3de8278165b_46a39620848480c2f6f28f4fa1ea64a8 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208a273412759334b1b90d6b3de8278165b_46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:36,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/e65f4b50cd294cde85d591ede5c3e8d9, store: [table=TestAcidGuarantees family=A region=46a39620848480c2f6f28f4fa1ea64a8] 2024-12-08T00:21:36,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/e65f4b50cd294cde85d591ede5c3e8d9 is 175, key is test_row_0/A:col10/1733617294683/Put/seqid=0 2024-12-08T00:21:36,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742361_1537 (size=31255) 2024-12-08T00:21:36,361 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=410, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/e65f4b50cd294cde85d591ede5c3e8d9 2024-12-08T00:21:36,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/4f9566c9eafe4ee881cc605ef62959f5 is 50, key is test_row_0/B:col10/1733617294683/Put/seqid=0 2024-12-08T00:21:36,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742362_1538 (size=12301) 2024-12-08T00:21:36,747 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/4e40dbea41bd42a1af8e2e050ac0c95f as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/4e40dbea41bd42a1af8e2e050ac0c95f 2024-12-08T00:21:36,752 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 46a39620848480c2f6f28f4fa1ea64a8/B of 46a39620848480c2f6f28f4fa1ea64a8 into 4e40dbea41bd42a1af8e2e050ac0c95f(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:36,752 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 46a39620848480c2f6f28f4fa1ea64a8: 2024-12-08T00:21:36,752 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8., storeName=46a39620848480c2f6f28f4fa1ea64a8/B, priority=12, startTime=1733617296321; duration=0sec 2024-12-08T00:21:36,752 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:21:36,752 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 46a39620848480c2f6f28f4fa1ea64a8:B 2024-12-08T00:21:36,752 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T00:21:36,753 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50022 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T00:21:36,753 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): 46a39620848480c2f6f28f4fa1ea64a8/C is initiating minor compaction (all files) 2024-12-08T00:21:36,754 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 46a39620848480c2f6f28f4fa1ea64a8/C in TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:36,754 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/272d46cb24a6483fb5ec787dc25cffc8, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/12618561106947d8bcb84f9ac7e90228, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/2a11577cb4914abea0983e053c655361, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/39c3ab6abbc44bdda6c230188aa76737] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp, totalSize=48.8 K 2024-12-08T00:21:36,754 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 46a39620848480c2f6f28f4fa1ea64a8#A#compaction#444 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:36,754 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 272d46cb24a6483fb5ec787dc25cffc8, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1733617291124 2024-12-08T00:21:36,754 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 12618561106947d8bcb84f9ac7e90228, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=355, earliestPutTs=1733617291257 2024-12-08T00:21:36,754 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/5f483ef4c94e43848c755e934495910e is 175, key is test_row_0/A:col10/1733617294666/Put/seqid=0 2024-12-08T00:21:36,755 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 2a11577cb4914abea0983e053c655361, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=373, earliestPutTs=1733617292395 2024-12-08T00:21:36,755 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 39c3ab6abbc44bdda6c230188aa76737, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=394, earliestPutTs=1733617293543 2024-12-08T00:21:36,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742363_1539 (size=32209) 2024-12-08T00:21:36,762 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/5f483ef4c94e43848c755e934495910e as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/5f483ef4c94e43848c755e934495910e 2024-12-08T00:21:36,764 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 46a39620848480c2f6f28f4fa1ea64a8#C#compaction#447 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:36,765 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/a9cefa1733694f42b777411348c71b98 is 50, key is test_row_0/C:col10/1733617294666/Put/seqid=0 2024-12-08T00:21:36,766 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 46a39620848480c2f6f28f4fa1ea64a8/A of 46a39620848480c2f6f28f4fa1ea64a8 into 5f483ef4c94e43848c755e934495910e(size=31.5 K), total size for store is 31.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:36,766 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 46a39620848480c2f6f28f4fa1ea64a8: 2024-12-08T00:21:36,766 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8., storeName=46a39620848480c2f6f28f4fa1ea64a8/A, priority=12, startTime=1733617296320; duration=0sec 2024-12-08T00:21:36,766 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:36,766 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 46a39620848480c2f6f28f4fa1ea64a8:A 2024-12-08T00:21:36,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742364_1540 (size=13255) 2024-12-08T00:21:36,771 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=410 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/4f9566c9eafe4ee881cc605ef62959f5 2024-12-08T00:21:36,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/ab27027b6314436ebe42af9518eec283 is 50, key is test_row_0/C:col10/1733617294683/Put/seqid=0 2024-12-08T00:21:36,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742365_1541 (size=12301) 2024-12-08T00:21:36,780 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=410 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/ab27027b6314436ebe42af9518eec283 2024-12-08T00:21:36,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/e65f4b50cd294cde85d591ede5c3e8d9 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/e65f4b50cd294cde85d591ede5c3e8d9 2024-12-08T00:21:36,786 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/e65f4b50cd294cde85d591ede5c3e8d9, entries=150, sequenceid=410, filesize=30.5 K 2024-12-08T00:21:36,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/4f9566c9eafe4ee881cc605ef62959f5 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/4f9566c9eafe4ee881cc605ef62959f5 2024-12-08T00:21:36,789 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/4f9566c9eafe4ee881cc605ef62959f5, entries=150, sequenceid=410, filesize=12.0 K 2024-12-08T00:21:36,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/ab27027b6314436ebe42af9518eec283 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/ab27027b6314436ebe42af9518eec283 2024-12-08T00:21:36,792 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/ab27027b6314436ebe42af9518eec283, entries=150, sequenceid=410, filesize=12.0 K 2024-12-08T00:21:36,792 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=0 B/0 for 46a39620848480c2f6f28f4fa1ea64a8 in 463ms, sequenceid=410, compaction requested=false 2024-12-08T00:21:36,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2538): Flush status journal for 46a39620848480c2f6f28f4fa1ea64a8: 2024-12-08T00:21:36,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:36,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=125 2024-12-08T00:21:36,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4106): Remote procedure done, pid=125 2024-12-08T00:21:36,794 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=125, resume processing ppid=124 2024-12-08T00:21:36,794 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=125, ppid=124, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6850 sec 2024-12-08T00:21:36,795 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=124, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees in 1.6880 sec 2024-12-08T00:21:36,809 DEBUG [Thread-1865 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2df33cdf to 127.0.0.1:62287 2024-12-08T00:21:36,809 DEBUG [Thread-1869 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x167a78b0 to 127.0.0.1:62287 2024-12-08T00:21:36,809 DEBUG [Thread-1865 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:21:36,809 DEBUG [Thread-1869 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:21:36,813 DEBUG [Thread-1861 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3637e4c6 to 127.0.0.1:62287 2024-12-08T00:21:36,813 DEBUG [Thread-1861 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:21:36,813 DEBUG [Thread-1863 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x72f422b4 to 127.0.0.1:62287 2024-12-08T00:21:36,813 DEBUG [Thread-1863 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:21:36,821 DEBUG [Thread-1867 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x09f472e0 to 127.0.0.1:62287 2024-12-08T00:21:36,821 DEBUG [Thread-1867 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:21:37,172 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/a9cefa1733694f42b777411348c71b98 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/a9cefa1733694f42b777411348c71b98 2024-12-08T00:21:37,175 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 46a39620848480c2f6f28f4fa1ea64a8/C of 46a39620848480c2f6f28f4fa1ea64a8 into a9cefa1733694f42b777411348c71b98(size=12.9 K), total size for store is 25.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:37,175 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 46a39620848480c2f6f28f4fa1ea64a8: 2024-12-08T00:21:37,175 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8., storeName=46a39620848480c2f6f28f4fa1ea64a8/C, priority=12, startTime=1733617296321; duration=0sec 2024-12-08T00:21:37,175 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:37,175 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 46a39620848480c2f6f28f4fa1ea64a8:C 2024-12-08T00:21:37,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-08T00:21:37,214 INFO [Thread-1871 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 124 completed 2024-12-08T00:21:37,214 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-08T00:21:37,214 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 58 2024-12-08T00:21:37,214 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 65 2024-12-08T00:21:37,214 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 63 2024-12-08T00:21:37,214 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 74 2024-12-08T00:21:37,214 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 64 2024-12-08T00:21:37,214 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-08T00:21:37,214 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-08T00:21:37,214 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2424 2024-12-08T00:21:37,214 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7272 rows 2024-12-08T00:21:37,214 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2426 2024-12-08T00:21:37,214 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7278 rows 2024-12-08T00:21:37,214 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2429 2024-12-08T00:21:37,214 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7287 rows 2024-12-08T00:21:37,214 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2433 2024-12-08T00:21:37,214 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7299 rows 2024-12-08T00:21:37,214 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2437 2024-12-08T00:21:37,214 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7311 rows 2024-12-08T00:21:37,214 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-08T00:21:37,214 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1df61dc9 to 127.0.0.1:62287 2024-12-08T00:21:37,214 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:21:37,216 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-08T00:21:37,217 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-08T00:21:37,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=126, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-08T00:21:37,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-08T00:21:37,222 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733617297221"}]},"ts":"1733617297221"} 2024-12-08T00:21:37,223 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-08T00:21:37,225 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-08T00:21:37,226 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-08T00:21:37,227 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=128, ppid=127, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=46a39620848480c2f6f28f4fa1ea64a8, UNASSIGN}] 2024-12-08T00:21:37,227 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=128, ppid=127, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=46a39620848480c2f6f28f4fa1ea64a8, UNASSIGN 2024-12-08T00:21:37,228 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=128 updating hbase:meta row=46a39620848480c2f6f28f4fa1ea64a8, regionState=CLOSING, regionLocation=017dd09fb407,36703,1733617179335 2024-12-08T00:21:37,228 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-08T00:21:37,228 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=129, ppid=128, state=RUNNABLE; CloseRegionProcedure 46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335}] 2024-12-08T00:21:37,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-08T00:21:37,379 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:37,380 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(124): Close 46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:37,380 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-08T00:21:37,380 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1681): Closing 46a39620848480c2f6f28f4fa1ea64a8, disabling compactions & flushes 2024-12-08T00:21:37,380 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:37,380 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:37,380 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. after waiting 0 ms 2024-12-08T00:21:37,380 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:37,380 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(2837): Flushing 46a39620848480c2f6f28f4fa1ea64a8 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-08T00:21:37,380 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=A 2024-12-08T00:21:37,381 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:37,381 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=B 2024-12-08T00:21:37,381 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:37,381 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 46a39620848480c2f6f28f4fa1ea64a8, store=C 2024-12-08T00:21:37,381 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:37,385 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208c9f0591b58f645419b573938796c5082_46a39620848480c2f6f28f4fa1ea64a8 is 50, key is test_row_0/A:col10/1733617296812/Put/seqid=0 2024-12-08T00:21:37,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742366_1542 (size=9914) 2024-12-08T00:21:37,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-08T00:21:37,705 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-08T00:21:37,789 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:37,792 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208c9f0591b58f645419b573938796c5082_46a39620848480c2f6f28f4fa1ea64a8 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208c9f0591b58f645419b573938796c5082_46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:37,793 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/7410859d5b4d48b7a47f2b0de7c41687, store: [table=TestAcidGuarantees family=A region=46a39620848480c2f6f28f4fa1ea64a8] 2024-12-08T00:21:37,793 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/7410859d5b4d48b7a47f2b0de7c41687 is 175, key is test_row_0/A:col10/1733617296812/Put/seqid=0 2024-12-08T00:21:37,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742367_1543 (size=22561) 2024-12-08T00:21:37,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-08T00:21:38,197 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=421, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/7410859d5b4d48b7a47f2b0de7c41687 2024-12-08T00:21:38,202 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/67fab9e0e9284839855024eaa833d261 is 50, key is test_row_0/B:col10/1733617296812/Put/seqid=0 2024-12-08T00:21:38,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742368_1544 (size=9857) 2024-12-08T00:21:38,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-08T00:21:38,606 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=421 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/67fab9e0e9284839855024eaa833d261 2024-12-08T00:21:38,611 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/cf294406c17a4f1eb29e6ee2c2b156b1 is 50, key is test_row_0/C:col10/1733617296812/Put/seqid=0 2024-12-08T00:21:38,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742369_1545 (size=9857) 2024-12-08T00:21:39,016 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=421 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/cf294406c17a4f1eb29e6ee2c2b156b1 2024-12-08T00:21:39,019 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/A/7410859d5b4d48b7a47f2b0de7c41687 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/7410859d5b4d48b7a47f2b0de7c41687 2024-12-08T00:21:39,022 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/7410859d5b4d48b7a47f2b0de7c41687, entries=100, sequenceid=421, filesize=22.0 K 2024-12-08T00:21:39,022 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/B/67fab9e0e9284839855024eaa833d261 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/67fab9e0e9284839855024eaa833d261 2024-12-08T00:21:39,025 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/67fab9e0e9284839855024eaa833d261, entries=100, sequenceid=421, filesize=9.6 K 2024-12-08T00:21:39,025 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/.tmp/C/cf294406c17a4f1eb29e6ee2c2b156b1 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/cf294406c17a4f1eb29e6ee2c2b156b1 2024-12-08T00:21:39,027 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/cf294406c17a4f1eb29e6ee2c2b156b1, entries=100, sequenceid=421, filesize=9.6 K 2024-12-08T00:21:39,028 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 46a39620848480c2f6f28f4fa1ea64a8 in 1648ms, sequenceid=421, compaction requested=true 2024-12-08T00:21:39,029 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/432fd7fceb644da5bcf136c54d6ab92a, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/c0795185bb464c779a85e71ca0408af8, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/b2f525cde39d4b91977b76a885fcda22, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/5d0f2c340dc34b6b93dd355eae93947f, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/f4ddf32f53ce460b8361ec3739351c2b, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/72337da115a24f76881634791b7b8751, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/bbc23103258d42caaa6524b96b65c1c6, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/3b23fb5c7616424fb55261e096154cc3, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/94bb316903ed47fab5dcec0d594ab19c, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/e887e0001a9541df99dab83d706c1c3c, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/3a823eaa8ec54fa1825864edef4d0af5, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/41afc61063684884a81a161c0a1a0741, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/49d32d8e5ea5404ea5f33537c16ae619, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/a5d209127d2241eca7da4a7d055cb33c, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/369d828985ae461f9c3ce871d260bc8a, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/623eb1ce58f14aa69d161cd1424bdfe9, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/4d1b3867c3ed4c448cd60563c9c009e9, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/8253cb1801074d68a2ecfb8780f2db81, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/d3f20f97ae38495396a2fe2e5b003cae, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/12818b29a55b4a33bf93aefcbb384b12, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/b107492b1673490182733c3cf1ffc56e, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/4b10f1a9132d48febcd8cbdd858cc56b, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/95826a910b2f49fcb3fe1827d0b61d22, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/d102726ec07340bd94c38cec2fddbd8b, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/e8f158cf3b104a5e9c02f5e18058bfb0, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/426f553d47c14f82814d752f5c526cf9, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/51a649d9d307429793138300381100dc, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/602c1b45352a4c08bc26af2afd3b0136] to archive 2024-12-08T00:21:39,029 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-08T00:21:39,030 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/432fd7fceb644da5bcf136c54d6ab92a to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/432fd7fceb644da5bcf136c54d6ab92a 2024-12-08T00:21:39,031 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/c0795185bb464c779a85e71ca0408af8 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/c0795185bb464c779a85e71ca0408af8 2024-12-08T00:21:39,032 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/b2f525cde39d4b91977b76a885fcda22 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/b2f525cde39d4b91977b76a885fcda22 2024-12-08T00:21:39,033 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/5d0f2c340dc34b6b93dd355eae93947f to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/5d0f2c340dc34b6b93dd355eae93947f 2024-12-08T00:21:39,034 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/f4ddf32f53ce460b8361ec3739351c2b to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/f4ddf32f53ce460b8361ec3739351c2b 2024-12-08T00:21:39,034 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/72337da115a24f76881634791b7b8751 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/72337da115a24f76881634791b7b8751 2024-12-08T00:21:39,035 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/bbc23103258d42caaa6524b96b65c1c6 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/bbc23103258d42caaa6524b96b65c1c6 2024-12-08T00:21:39,036 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/3b23fb5c7616424fb55261e096154cc3 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/3b23fb5c7616424fb55261e096154cc3 2024-12-08T00:21:39,037 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/94bb316903ed47fab5dcec0d594ab19c to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/94bb316903ed47fab5dcec0d594ab19c 2024-12-08T00:21:39,038 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/e887e0001a9541df99dab83d706c1c3c to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/e887e0001a9541df99dab83d706c1c3c 2024-12-08T00:21:39,038 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/3a823eaa8ec54fa1825864edef4d0af5 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/3a823eaa8ec54fa1825864edef4d0af5 2024-12-08T00:21:39,039 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/41afc61063684884a81a161c0a1a0741 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/41afc61063684884a81a161c0a1a0741 2024-12-08T00:21:39,040 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/49d32d8e5ea5404ea5f33537c16ae619 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/49d32d8e5ea5404ea5f33537c16ae619 2024-12-08T00:21:39,040 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/a5d209127d2241eca7da4a7d055cb33c to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/a5d209127d2241eca7da4a7d055cb33c 2024-12-08T00:21:39,041 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/369d828985ae461f9c3ce871d260bc8a to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/369d828985ae461f9c3ce871d260bc8a 2024-12-08T00:21:39,042 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/623eb1ce58f14aa69d161cd1424bdfe9 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/623eb1ce58f14aa69d161cd1424bdfe9 2024-12-08T00:21:39,042 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/4d1b3867c3ed4c448cd60563c9c009e9 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/4d1b3867c3ed4c448cd60563c9c009e9 2024-12-08T00:21:39,043 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/8253cb1801074d68a2ecfb8780f2db81 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/8253cb1801074d68a2ecfb8780f2db81 2024-12-08T00:21:39,044 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/d3f20f97ae38495396a2fe2e5b003cae to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/d3f20f97ae38495396a2fe2e5b003cae 2024-12-08T00:21:39,045 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/12818b29a55b4a33bf93aefcbb384b12 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/12818b29a55b4a33bf93aefcbb384b12 2024-12-08T00:21:39,046 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/b107492b1673490182733c3cf1ffc56e to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/b107492b1673490182733c3cf1ffc56e 2024-12-08T00:21:39,046 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/4b10f1a9132d48febcd8cbdd858cc56b to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/4b10f1a9132d48febcd8cbdd858cc56b 2024-12-08T00:21:39,047 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/95826a910b2f49fcb3fe1827d0b61d22 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/95826a910b2f49fcb3fe1827d0b61d22 2024-12-08T00:21:39,048 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/d102726ec07340bd94c38cec2fddbd8b to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/d102726ec07340bd94c38cec2fddbd8b 2024-12-08T00:21:39,048 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/e8f158cf3b104a5e9c02f5e18058bfb0 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/e8f158cf3b104a5e9c02f5e18058bfb0 2024-12-08T00:21:39,049 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/426f553d47c14f82814d752f5c526cf9 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/426f553d47c14f82814d752f5c526cf9 2024-12-08T00:21:39,050 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/51a649d9d307429793138300381100dc to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/51a649d9d307429793138300381100dc 2024-12-08T00:21:39,051 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/602c1b45352a4c08bc26af2afd3b0136 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/602c1b45352a4c08bc26af2afd3b0136 2024-12-08T00:21:39,052 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/d9b9fb42a3744aec8d27da0d042cd1a5, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/f00285bc3bea43cf8e091e46b36c3625, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/46e9a37b699140719030fa899d62d1c5, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/b1b743ce0c7440ac9ce3c3f5bfefb50c, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/c1ed2aa072d341c6a9a35b922aecad3e, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/05bc8cf86dea466dba32cd1f1164f421, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/da5cd855884a495dadc2e07938549328, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/0f6aac3b862b4002a802362c13542ef5, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/2f05f8f6986a44ab8e085afd16b1cebb, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/25fb4a3f891a4a1ea44eb0a845b2858b, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/dcce2c892476415e9c6b7038ec13c375, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/3017892959a64dcc918247e039a4c503, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/59145572a6cb484bb84548a2cd029822, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/5f0b5ee06a70496f9443b8655d1f3e95, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/38fc5d99c6624c04b7763393833400ff, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/bf8649cbafae4762893882d8617c0fac, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/ea021a91114f4cf6aa07ba62a9aa0aee, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/2885e5b36ccb4848996892d0ace31c5e, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/4e79adf66f4a4f64bbee89f6accf4426, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/7ac27aac257b42819e702af4c4800049, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/a9028ef319a848758dff537108761473, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/551a1c9d037a4b5b8b01ec6db0c16f1e, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/01f49b19a7144015a5e510309cf3440a, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/fe293ece20124ee7a8e8b4427b15b6f2, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/ff6f07bc58294f60a9bb2ae341c22b4a, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/530bc85f2a1d45818b495d48a7b77672, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/70bc9ecc1be749cdbc6a8132caa11a11, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/e3686a9d3e844c02a5765562faa2edce] to archive 2024-12-08T00:21:39,052 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-08T00:21:39,053 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/d9b9fb42a3744aec8d27da0d042cd1a5 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/d9b9fb42a3744aec8d27da0d042cd1a5 2024-12-08T00:21:39,054 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/f00285bc3bea43cf8e091e46b36c3625 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/f00285bc3bea43cf8e091e46b36c3625 2024-12-08T00:21:39,055 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/46e9a37b699140719030fa899d62d1c5 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/46e9a37b699140719030fa899d62d1c5 2024-12-08T00:21:39,055 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/b1b743ce0c7440ac9ce3c3f5bfefb50c to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/b1b743ce0c7440ac9ce3c3f5bfefb50c 2024-12-08T00:21:39,056 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/c1ed2aa072d341c6a9a35b922aecad3e to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/c1ed2aa072d341c6a9a35b922aecad3e 2024-12-08T00:21:39,057 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/05bc8cf86dea466dba32cd1f1164f421 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/05bc8cf86dea466dba32cd1f1164f421 2024-12-08T00:21:39,058 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/da5cd855884a495dadc2e07938549328 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/da5cd855884a495dadc2e07938549328 2024-12-08T00:21:39,058 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/0f6aac3b862b4002a802362c13542ef5 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/0f6aac3b862b4002a802362c13542ef5 2024-12-08T00:21:39,059 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/2f05f8f6986a44ab8e085afd16b1cebb to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/2f05f8f6986a44ab8e085afd16b1cebb 2024-12-08T00:21:39,060 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/25fb4a3f891a4a1ea44eb0a845b2858b to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/25fb4a3f891a4a1ea44eb0a845b2858b 2024-12-08T00:21:39,061 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/dcce2c892476415e9c6b7038ec13c375 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/dcce2c892476415e9c6b7038ec13c375 2024-12-08T00:21:39,062 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/3017892959a64dcc918247e039a4c503 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/3017892959a64dcc918247e039a4c503 2024-12-08T00:21:39,063 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/59145572a6cb484bb84548a2cd029822 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/59145572a6cb484bb84548a2cd029822 2024-12-08T00:21:39,064 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/5f0b5ee06a70496f9443b8655d1f3e95 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/5f0b5ee06a70496f9443b8655d1f3e95 2024-12-08T00:21:39,064 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/38fc5d99c6624c04b7763393833400ff to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/38fc5d99c6624c04b7763393833400ff 2024-12-08T00:21:39,065 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/bf8649cbafae4762893882d8617c0fac to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/bf8649cbafae4762893882d8617c0fac 2024-12-08T00:21:39,066 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/ea021a91114f4cf6aa07ba62a9aa0aee to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/ea021a91114f4cf6aa07ba62a9aa0aee 2024-12-08T00:21:39,067 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/2885e5b36ccb4848996892d0ace31c5e to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/2885e5b36ccb4848996892d0ace31c5e 2024-12-08T00:21:39,068 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/4e79adf66f4a4f64bbee89f6accf4426 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/4e79adf66f4a4f64bbee89f6accf4426 2024-12-08T00:21:39,069 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/7ac27aac257b42819e702af4c4800049 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/7ac27aac257b42819e702af4c4800049 2024-12-08T00:21:39,070 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/a9028ef319a848758dff537108761473 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/a9028ef319a848758dff537108761473 2024-12-08T00:21:39,071 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/551a1c9d037a4b5b8b01ec6db0c16f1e to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/551a1c9d037a4b5b8b01ec6db0c16f1e 2024-12-08T00:21:39,071 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/01f49b19a7144015a5e510309cf3440a to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/01f49b19a7144015a5e510309cf3440a 2024-12-08T00:21:39,072 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/fe293ece20124ee7a8e8b4427b15b6f2 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/fe293ece20124ee7a8e8b4427b15b6f2 2024-12-08T00:21:39,073 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/ff6f07bc58294f60a9bb2ae341c22b4a to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/ff6f07bc58294f60a9bb2ae341c22b4a 2024-12-08T00:21:39,074 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/530bc85f2a1d45818b495d48a7b77672 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/530bc85f2a1d45818b495d48a7b77672 2024-12-08T00:21:39,075 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/70bc9ecc1be749cdbc6a8132caa11a11 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/70bc9ecc1be749cdbc6a8132caa11a11 2024-12-08T00:21:39,076 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/e3686a9d3e844c02a5765562faa2edce to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/e3686a9d3e844c02a5765562faa2edce 2024-12-08T00:21:39,077 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/261732359f6842bbb6a3b7ea60dcfe59, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/10052a952488465f807ca27c14d53f8e, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/e7031ba6eb49401698271504ffe1960e, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/7e7e42ed3632403a8b7223153ef72e3e, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/a3f21c7339cb4cc4a7dceeacbd746d2e, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/e58f925e1483440a836addb7a26ca68c, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/50fd0e762c4144e68c610dd845fad2bb, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/9984117ab6c44fcdb62f2dc93cfb304a, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/9b359f923f6b4cccb6b82f054c95e14e, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/c8986d7937c2444fbfcf4b65e32d3033, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/da98f1a22f8549a09bb42e9c7af05e07, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/03cd4b9630554c94baefc22d48d07dd1, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/d990293695e74820bc37d6886294776f, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/717ebb6a63f74efdb5a716be2a4d8ab1, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/3dffb66baed2475880c5bb0cd9f18baa, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/601ccc044f6b4d8db8970cc84ac17c0f, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/15bb9a1db9e8474aa20cac30f2c22953, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/eb05a1d583c94f359cd51722c21840b6, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/4aff4962d7384ef9a7e78389a8329f62, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/0592b361dbc84878b7cf9f0985277137, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/046950187e2d45d0adfb4203d2b7f04a, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/e382e1dcdbd2468b8ab6a88a17ae7452, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/491bbcb79dc247429965548ff01b8320, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/272d46cb24a6483fb5ec787dc25cffc8, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/18e2578783fd4f17a90c478508f024b8, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/12618561106947d8bcb84f9ac7e90228, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/2a11577cb4914abea0983e053c655361, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/39c3ab6abbc44bdda6c230188aa76737] to archive 2024-12-08T00:21:39,077 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-08T00:21:39,079 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/261732359f6842bbb6a3b7ea60dcfe59 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/261732359f6842bbb6a3b7ea60dcfe59 2024-12-08T00:21:39,079 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/10052a952488465f807ca27c14d53f8e to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/10052a952488465f807ca27c14d53f8e 2024-12-08T00:21:39,080 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/e7031ba6eb49401698271504ffe1960e to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/e7031ba6eb49401698271504ffe1960e 2024-12-08T00:21:39,081 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/7e7e42ed3632403a8b7223153ef72e3e to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/7e7e42ed3632403a8b7223153ef72e3e 2024-12-08T00:21:39,082 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/a3f21c7339cb4cc4a7dceeacbd746d2e to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/a3f21c7339cb4cc4a7dceeacbd746d2e 2024-12-08T00:21:39,083 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/e58f925e1483440a836addb7a26ca68c to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/e58f925e1483440a836addb7a26ca68c 2024-12-08T00:21:39,084 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/50fd0e762c4144e68c610dd845fad2bb to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/50fd0e762c4144e68c610dd845fad2bb 2024-12-08T00:21:39,085 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/9984117ab6c44fcdb62f2dc93cfb304a to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/9984117ab6c44fcdb62f2dc93cfb304a 2024-12-08T00:21:39,085 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/9b359f923f6b4cccb6b82f054c95e14e to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/9b359f923f6b4cccb6b82f054c95e14e 2024-12-08T00:21:39,086 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/c8986d7937c2444fbfcf4b65e32d3033 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/c8986d7937c2444fbfcf4b65e32d3033 2024-12-08T00:21:39,087 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/da98f1a22f8549a09bb42e9c7af05e07 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/da98f1a22f8549a09bb42e9c7af05e07 2024-12-08T00:21:39,088 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/03cd4b9630554c94baefc22d48d07dd1 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/03cd4b9630554c94baefc22d48d07dd1 2024-12-08T00:21:39,089 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/d990293695e74820bc37d6886294776f to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/d990293695e74820bc37d6886294776f 2024-12-08T00:21:39,089 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/717ebb6a63f74efdb5a716be2a4d8ab1 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/717ebb6a63f74efdb5a716be2a4d8ab1 2024-12-08T00:21:39,090 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/3dffb66baed2475880c5bb0cd9f18baa to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/3dffb66baed2475880c5bb0cd9f18baa 2024-12-08T00:21:39,091 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/601ccc044f6b4d8db8970cc84ac17c0f to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/601ccc044f6b4d8db8970cc84ac17c0f 2024-12-08T00:21:39,092 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/15bb9a1db9e8474aa20cac30f2c22953 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/15bb9a1db9e8474aa20cac30f2c22953 2024-12-08T00:21:39,093 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/eb05a1d583c94f359cd51722c21840b6 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/eb05a1d583c94f359cd51722c21840b6 2024-12-08T00:21:39,094 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/4aff4962d7384ef9a7e78389a8329f62 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/4aff4962d7384ef9a7e78389a8329f62 2024-12-08T00:21:39,095 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/0592b361dbc84878b7cf9f0985277137 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/0592b361dbc84878b7cf9f0985277137 2024-12-08T00:21:39,095 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/046950187e2d45d0adfb4203d2b7f04a to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/046950187e2d45d0adfb4203d2b7f04a 2024-12-08T00:21:39,096 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/e382e1dcdbd2468b8ab6a88a17ae7452 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/e382e1dcdbd2468b8ab6a88a17ae7452 2024-12-08T00:21:39,097 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/491bbcb79dc247429965548ff01b8320 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/491bbcb79dc247429965548ff01b8320 2024-12-08T00:21:39,098 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/272d46cb24a6483fb5ec787dc25cffc8 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/272d46cb24a6483fb5ec787dc25cffc8 2024-12-08T00:21:39,099 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/18e2578783fd4f17a90c478508f024b8 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/18e2578783fd4f17a90c478508f024b8 2024-12-08T00:21:39,100 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/12618561106947d8bcb84f9ac7e90228 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/12618561106947d8bcb84f9ac7e90228 2024-12-08T00:21:39,101 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/2a11577cb4914abea0983e053c655361 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/2a11577cb4914abea0983e053c655361 2024-12-08T00:21:39,101 DEBUG [StoreCloser-TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/39c3ab6abbc44bdda6c230188aa76737 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/39c3ab6abbc44bdda6c230188aa76737 2024-12-08T00:21:39,105 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/recovered.edits/424.seqid, newMaxSeqId=424, maxSeqId=4 2024-12-08T00:21:39,105 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8. 2024-12-08T00:21:39,106 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1635): Region close journal for 46a39620848480c2f6f28f4fa1ea64a8: 2024-12-08T00:21:39,107 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(170): Closed 46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:39,107 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=128 updating hbase:meta row=46a39620848480c2f6f28f4fa1ea64a8, regionState=CLOSED 2024-12-08T00:21:39,109 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=129, resume processing ppid=128 2024-12-08T00:21:39,109 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=129, ppid=128, state=SUCCESS; CloseRegionProcedure 46a39620848480c2f6f28f4fa1ea64a8, server=017dd09fb407,36703,1733617179335 in 1.8800 sec 2024-12-08T00:21:39,110 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=128, resume processing ppid=127 2024-12-08T00:21:39,110 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=128, ppid=127, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=46a39620848480c2f6f28f4fa1ea64a8, UNASSIGN in 1.8820 sec 2024-12-08T00:21:39,111 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=127, resume processing ppid=126 2024-12-08T00:21:39,111 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=127, ppid=126, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.8850 sec 2024-12-08T00:21:39,112 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733617299112"}]},"ts":"1733617299112"} 2024-12-08T00:21:39,113 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-08T00:21:39,115 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-08T00:21:39,116 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=126, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.8980 sec 2024-12-08T00:21:39,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-08T00:21:39,326 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 126 completed 2024-12-08T00:21:39,326 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-08T00:21:39,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=130, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T00:21:39,327 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=130, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T00:21:39,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-08T00:21:39,328 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=130, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T00:21:39,329 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:39,331 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A, FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B, FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C, FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/recovered.edits] 2024-12-08T00:21:39,333 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/5f483ef4c94e43848c755e934495910e to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/5f483ef4c94e43848c755e934495910e 2024-12-08T00:21:39,334 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/7410859d5b4d48b7a47f2b0de7c41687 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/7410859d5b4d48b7a47f2b0de7c41687 2024-12-08T00:21:39,335 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/e65f4b50cd294cde85d591ede5c3e8d9 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/A/e65f4b50cd294cde85d591ede5c3e8d9 2024-12-08T00:21:39,337 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/4e40dbea41bd42a1af8e2e050ac0c95f to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/4e40dbea41bd42a1af8e2e050ac0c95f 2024-12-08T00:21:39,338 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/4f9566c9eafe4ee881cc605ef62959f5 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/4f9566c9eafe4ee881cc605ef62959f5 2024-12-08T00:21:39,339 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/67fab9e0e9284839855024eaa833d261 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/B/67fab9e0e9284839855024eaa833d261 2024-12-08T00:21:39,341 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/a9cefa1733694f42b777411348c71b98 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/a9cefa1733694f42b777411348c71b98 2024-12-08T00:21:39,342 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/ab27027b6314436ebe42af9518eec283 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/ab27027b6314436ebe42af9518eec283 2024-12-08T00:21:39,342 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/cf294406c17a4f1eb29e6ee2c2b156b1 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/C/cf294406c17a4f1eb29e6ee2c2b156b1 2024-12-08T00:21:39,345 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/recovered.edits/424.seqid to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8/recovered.edits/424.seqid 2024-12-08T00:21:39,345 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:39,345 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-08T00:21:39,345 DEBUG [PEWorker-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-08T00:21:39,346 DEBUG [PEWorker-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-08T00:21:39,348 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412081d965d2eb91743548b42b58c41558300_46a39620848480c2f6f28f4fa1ea64a8 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412081d965d2eb91743548b42b58c41558300_46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:39,349 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120824ed98c0e5d944b98662145f667756e7_46a39620848480c2f6f28f4fa1ea64a8 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120824ed98c0e5d944b98662145f667756e7_46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:39,350 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412083003fd13a243404c9275ba33d6290ce1_46a39620848480c2f6f28f4fa1ea64a8 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412083003fd13a243404c9275ba33d6290ce1_46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:39,351 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208316357cdf25d4e4f9b8ce3ca6b98cc69_46a39620848480c2f6f28f4fa1ea64a8 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208316357cdf25d4e4f9b8ce3ca6b98cc69_46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:39,352 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120833aa31fc662f45c28da43c66df4ff802_46a39620848480c2f6f28f4fa1ea64a8 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120833aa31fc662f45c28da43c66df4ff802_46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:39,353 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412083478fc8925924ffc814dc02ce2a7afc4_46a39620848480c2f6f28f4fa1ea64a8 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412083478fc8925924ffc814dc02ce2a7afc4_46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:39,354 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208383118e4278f424880d00fe743c66829_46a39620848480c2f6f28f4fa1ea64a8 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208383118e4278f424880d00fe743c66829_46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:39,355 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412083a35fae033b640ea9ed799c319fd02c7_46a39620848480c2f6f28f4fa1ea64a8 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412083a35fae033b640ea9ed799c319fd02c7_46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:39,356 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412084768058e262f4935a268c70edd5615a9_46a39620848480c2f6f28f4fa1ea64a8 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412084768058e262f4935a268c70edd5615a9_46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:39,356 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120850aa1a3e3e0d49968fbb277f3323e223_46a39620848480c2f6f28f4fa1ea64a8 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120850aa1a3e3e0d49968fbb277f3323e223_46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:39,357 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412085f438bf1b70e481abd7021c54947ed63_46a39620848480c2f6f28f4fa1ea64a8 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412085f438bf1b70e481abd7021c54947ed63_46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:39,358 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208666bb72144f945b6ad1ea9c8d4622a37_46a39620848480c2f6f28f4fa1ea64a8 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208666bb72144f945b6ad1ea9c8d4622a37_46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:39,359 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412087c6c49c56b6d4de0a95c8d0fbc7064d9_46a39620848480c2f6f28f4fa1ea64a8 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412087c6c49c56b6d4de0a95c8d0fbc7064d9_46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:39,360 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120892f957edbfcd4244919d097c38727400_46a39620848480c2f6f28f4fa1ea64a8 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120892f957edbfcd4244919d097c38727400_46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:39,361 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412089c1425fee5e74608a1f0b635e852fdc6_46a39620848480c2f6f28f4fa1ea64a8 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412089c1425fee5e74608a1f0b635e852fdc6_46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:39,362 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208a273412759334b1b90d6b3de8278165b_46a39620848480c2f6f28f4fa1ea64a8 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208a273412759334b1b90d6b3de8278165b_46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:39,362 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208ada8482871724bcab81b9c34753a4d95_46a39620848480c2f6f28f4fa1ea64a8 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208ada8482871724bcab81b9c34753a4d95_46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:39,363 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208b6b412d720c048a4b0dfe4bdbe5d0b57_46a39620848480c2f6f28f4fa1ea64a8 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208b6b412d720c048a4b0dfe4bdbe5d0b57_46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:39,364 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208baf8ed55df934f9f8b3de2a8b4e7b924_46a39620848480c2f6f28f4fa1ea64a8 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208baf8ed55df934f9f8b3de2a8b4e7b924_46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:39,365 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208bc7f42ce9c6b4d33908a7a612f87169e_46a39620848480c2f6f28f4fa1ea64a8 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208bc7f42ce9c6b4d33908a7a612f87169e_46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:39,366 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208c9f0591b58f645419b573938796c5082_46a39620848480c2f6f28f4fa1ea64a8 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208c9f0591b58f645419b573938796c5082_46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:39,367 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208d609ebb944164450b565447816a216f1_46a39620848480c2f6f28f4fa1ea64a8 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208d609ebb944164450b565447816a216f1_46a39620848480c2f6f28f4fa1ea64a8 2024-12-08T00:21:39,367 DEBUG [PEWorker-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-08T00:21:39,369 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=130, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T00:21:39,370 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-08T00:21:39,372 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-12-08T00:21:39,373 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=130, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T00:21:39,373 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-12-08T00:21:39,373 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733617299373"}]},"ts":"9223372036854775807"} 2024-12-08T00:21:39,374 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-08T00:21:39,374 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 46a39620848480c2f6f28f4fa1ea64a8, NAME => 'TestAcidGuarantees,,1733617272077.46a39620848480c2f6f28f4fa1ea64a8.', STARTKEY => '', ENDKEY => ''}] 2024-12-08T00:21:39,374 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-12-08T00:21:39,374 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733617299374"}]},"ts":"9223372036854775807"} 2024-12-08T00:21:39,376 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-08T00:21:39,377 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=130, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T00:21:39,378 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=130, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 51 msec 2024-12-08T00:21:39,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-08T00:21:39,429 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 130 completed 2024-12-08T00:21:39,439 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=240 (was 241), OpenFileDescriptor=461 (was 461), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=410 (was 412), ProcessCount=11 (was 11), AvailableMemoryMB=7557 (was 7610) 2024-12-08T00:21:39,448 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=240, OpenFileDescriptor=461, MaxFileDescriptor=1048576, SystemLoadAverage=410, ProcessCount=11, AvailableMemoryMB=7556 2024-12-08T00:21:39,450 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-08T00:21:39,450 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T00:21:39,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=131, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-08T00:21:39,451 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-08T00:21:39,452 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:39,452 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 131 2024-12-08T00:21:39,452 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-08T00:21:39,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=131 2024-12-08T00:21:39,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742370_1546 (size=963) 2024-12-08T00:21:39,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=131 2024-12-08T00:21:39,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=131 2024-12-08T00:21:39,858 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3 2024-12-08T00:21:39,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742371_1547 (size=53) 2024-12-08T00:21:40,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=131 2024-12-08T00:21:40,263 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:21:40,264 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing cc3ed2949e0e40ebaa106781844b31d7, disabling compactions & flushes 2024-12-08T00:21:40,264 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:40,264 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:40,264 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. after waiting 0 ms 2024-12-08T00:21:40,264 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:40,264 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:40,264 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for cc3ed2949e0e40ebaa106781844b31d7: 2024-12-08T00:21:40,265 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-08T00:21:40,265 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733617300265"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733617300265"}]},"ts":"1733617300265"} 2024-12-08T00:21:40,266 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-08T00:21:40,266 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-08T00:21:40,266 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733617300266"}]},"ts":"1733617300266"} 2024-12-08T00:21:40,267 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-08T00:21:40,270 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=132, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=cc3ed2949e0e40ebaa106781844b31d7, ASSIGN}] 2024-12-08T00:21:40,271 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=132, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=cc3ed2949e0e40ebaa106781844b31d7, ASSIGN 2024-12-08T00:21:40,271 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=132, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=cc3ed2949e0e40ebaa106781844b31d7, ASSIGN; state=OFFLINE, location=017dd09fb407,36703,1733617179335; forceNewPlan=false, retain=false 2024-12-08T00:21:40,422 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=132 updating hbase:meta row=cc3ed2949e0e40ebaa106781844b31d7, regionState=OPENING, regionLocation=017dd09fb407,36703,1733617179335 2024-12-08T00:21:40,423 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE; OpenRegionProcedure cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335}] 2024-12-08T00:21:40,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=131 2024-12-08T00:21:40,574 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:40,577 INFO [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:40,577 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] regionserver.HRegion(7285): Opening region: {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} 2024-12-08T00:21:40,577 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees cc3ed2949e0e40ebaa106781844b31d7 2024-12-08T00:21:40,577 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:21:40,577 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] regionserver.HRegion(7327): checking encryption for cc3ed2949e0e40ebaa106781844b31d7 2024-12-08T00:21:40,577 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] regionserver.HRegion(7330): checking classloading for cc3ed2949e0e40ebaa106781844b31d7 2024-12-08T00:21:40,578 INFO [StoreOpener-cc3ed2949e0e40ebaa106781844b31d7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region cc3ed2949e0e40ebaa106781844b31d7 2024-12-08T00:21:40,579 INFO [StoreOpener-cc3ed2949e0e40ebaa106781844b31d7-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-08T00:21:40,580 INFO [StoreOpener-cc3ed2949e0e40ebaa106781844b31d7-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cc3ed2949e0e40ebaa106781844b31d7 columnFamilyName A 2024-12-08T00:21:40,580 DEBUG [StoreOpener-cc3ed2949e0e40ebaa106781844b31d7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:40,580 INFO [StoreOpener-cc3ed2949e0e40ebaa106781844b31d7-1 {}] regionserver.HStore(327): Store=cc3ed2949e0e40ebaa106781844b31d7/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:21:40,580 INFO [StoreOpener-cc3ed2949e0e40ebaa106781844b31d7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region cc3ed2949e0e40ebaa106781844b31d7 2024-12-08T00:21:40,581 INFO [StoreOpener-cc3ed2949e0e40ebaa106781844b31d7-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-08T00:21:40,581 INFO [StoreOpener-cc3ed2949e0e40ebaa106781844b31d7-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cc3ed2949e0e40ebaa106781844b31d7 columnFamilyName B 2024-12-08T00:21:40,581 DEBUG [StoreOpener-cc3ed2949e0e40ebaa106781844b31d7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:40,582 INFO [StoreOpener-cc3ed2949e0e40ebaa106781844b31d7-1 {}] regionserver.HStore(327): Store=cc3ed2949e0e40ebaa106781844b31d7/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:21:40,582 INFO [StoreOpener-cc3ed2949e0e40ebaa106781844b31d7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region cc3ed2949e0e40ebaa106781844b31d7 2024-12-08T00:21:40,583 INFO [StoreOpener-cc3ed2949e0e40ebaa106781844b31d7-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-08T00:21:40,583 INFO [StoreOpener-cc3ed2949e0e40ebaa106781844b31d7-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cc3ed2949e0e40ebaa106781844b31d7 columnFamilyName C 2024-12-08T00:21:40,583 DEBUG [StoreOpener-cc3ed2949e0e40ebaa106781844b31d7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:21:40,583 INFO [StoreOpener-cc3ed2949e0e40ebaa106781844b31d7-1 {}] regionserver.HStore(327): Store=cc3ed2949e0e40ebaa106781844b31d7/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:21:40,584 INFO [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:40,584 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7 2024-12-08T00:21:40,585 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7 2024-12-08T00:21:40,586 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-08T00:21:40,587 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] regionserver.HRegion(1085): writing seq id for cc3ed2949e0e40ebaa106781844b31d7 2024-12-08T00:21:40,588 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T00:21:40,589 INFO [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] regionserver.HRegion(1102): Opened cc3ed2949e0e40ebaa106781844b31d7; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68615337, jitterRate=0.02244819700717926}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-08T00:21:40,589 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] regionserver.HRegion(1001): Region open journal for cc3ed2949e0e40ebaa106781844b31d7: 2024-12-08T00:21:40,590 INFO [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7., pid=133, masterSystemTime=1733617300574 2024-12-08T00:21:40,591 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:40,591 INFO [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:40,591 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=132 updating hbase:meta row=cc3ed2949e0e40ebaa106781844b31d7, regionState=OPEN, openSeqNum=2, regionLocation=017dd09fb407,36703,1733617179335 2024-12-08T00:21:40,593 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=133, resume processing ppid=132 2024-12-08T00:21:40,593 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=133, ppid=132, state=SUCCESS; OpenRegionProcedure cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 in 169 msec 2024-12-08T00:21:40,594 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=132, resume processing ppid=131 2024-12-08T00:21:40,594 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=132, ppid=131, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=cc3ed2949e0e40ebaa106781844b31d7, ASSIGN in 323 msec 2024-12-08T00:21:40,595 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-08T00:21:40,595 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733617300595"}]},"ts":"1733617300595"} 2024-12-08T00:21:40,596 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-08T00:21:40,598 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-08T00:21:40,599 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=131, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1480 sec 2024-12-08T00:21:41,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=131 2024-12-08T00:21:41,556 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 131 completed 2024-12-08T00:21:41,557 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x022a6e9f to 127.0.0.1:62287 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4c60eb7d 2024-12-08T00:21:41,560 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@695c2253, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:21:41,561 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:21:41,562 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53974, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:21:41,563 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-08T00:21:41,564 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40544, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-08T00:21:41,565 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x32c12a30 to 127.0.0.1:62287 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@79b10416 2024-12-08T00:21:41,568 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7177efc9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:21:41,568 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5ef40578 to 127.0.0.1:62287 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2f142b04 2024-12-08T00:21:41,571 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@61d38088, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:21:41,571 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x032bb71c to 127.0.0.1:62287 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@de9f076 2024-12-08T00:21:41,574 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7043f683, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:21:41,575 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x06bc0f7c to 127.0.0.1:62287 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4414259d 2024-12-08T00:21:41,577 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b0c2472, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:21:41,578 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1b8b6e04 to 127.0.0.1:62287 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7ed69825 2024-12-08T00:21:41,580 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34b30c39, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:21:41,581 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x11193a0c to 127.0.0.1:62287 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3d672ed2 2024-12-08T00:21:41,583 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f7c40ba, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:21:41,584 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7861b162 to 127.0.0.1:62287 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7cf40102 2024-12-08T00:21:41,586 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@41b0e7b6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:21:41,587 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x154f0f85 to 127.0.0.1:62287 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@496fe03f 2024-12-08T00:21:41,589 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@f2423f3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:21:41,590 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x008a917b to 127.0.0.1:62287 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3652e74d 2024-12-08T00:21:41,594 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@184771cf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:21:41,595 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x054c2725 to 127.0.0.1:62287 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2405c04e 2024-12-08T00:21:41,598 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76f0408, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:21:41,602 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T00:21:41,602 DEBUG [hconnection-0x2d56e567-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:21:41,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees 2024-12-08T00:21:41,603 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53990, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:21:41,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-08T00:21:41,604 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:21:41,604 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:21:41,604 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=135, ppid=134, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:21:41,608 DEBUG [hconnection-0x5512c8e1-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:21:41,609 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53998, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:21:41,616 DEBUG [hconnection-0xcad36d5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:21:41,616 DEBUG [hconnection-0x5eb11946-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:21:41,616 DEBUG [hconnection-0x11d83f6a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:21:41,617 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54014, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:21:41,617 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54004, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:21:41,617 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54018, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:21:41,617 DEBUG [hconnection-0x4cd83062-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:21:41,618 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54024, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:21:41,621 DEBUG [hconnection-0x4251a1ec-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:21:41,622 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54032, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:21:41,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on cc3ed2949e0e40ebaa106781844b31d7 2024-12-08T00:21:41,624 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cc3ed2949e0e40ebaa106781844b31d7 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-08T00:21:41,625 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=A 2024-12-08T00:21:41,625 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:41,625 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=B 2024-12-08T00:21:41,625 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:41,625 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=C 2024-12-08T00:21:41,625 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:41,632 DEBUG [hconnection-0x6a5043a7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:21:41,633 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54038, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:21:41,634 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:41,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54018 deadline: 1733617361633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:41,634 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:41,635 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:41,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54014 deadline: 1733617361634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:41,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54024 deadline: 1733617361634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:41,635 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:41,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54004 deadline: 1733617361635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:41,639 DEBUG [hconnection-0xf1b3abb-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:21:41,639 DEBUG [hconnection-0x5f15c255-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:21:41,640 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54040, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:21:41,640 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54052, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:21:41,641 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:41,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54040 deadline: 1733617361641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:41,663 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/6e74ac10d34343b885a947fd1373d67f is 50, key is test_row_0/A:col10/1733617301621/Put/seqid=0 2024-12-08T00:21:41,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742372_1548 (size=16681) 2024-12-08T00:21:41,674 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/6e74ac10d34343b885a947fd1373d67f 2024-12-08T00:21:41,704 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/c7a6bbb984204ae8bfab7d8976062ce0 is 50, key is test_row_0/B:col10/1733617301621/Put/seqid=0 2024-12-08T00:21:41,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-08T00:21:41,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742373_1549 (size=12001) 2024-12-08T00:21:41,709 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/c7a6bbb984204ae8bfab7d8976062ce0 2024-12-08T00:21:41,736 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/72f959ce6eb9417d92e96a0a764212af is 50, key is test_row_0/C:col10/1733617301621/Put/seqid=0 2024-12-08T00:21:41,737 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:41,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54018 deadline: 1733617361735, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:41,737 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:41,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54024 deadline: 1733617361736, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:41,738 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:41,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54014 deadline: 1733617361736, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:41,738 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:41,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54004 deadline: 1733617361736, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:41,742 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:41,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54040 deadline: 1733617361742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:41,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742374_1550 (size=12001) 2024-12-08T00:21:41,756 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:41,756 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-08T00:21:41,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:41,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. as already flushing 2024-12-08T00:21:41,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:41,756 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:41,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:41,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:41,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-08T00:21:41,911 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:41,911 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-08T00:21:41,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:41,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. as already flushing 2024-12-08T00:21:41,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:41,911 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:41,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:41,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:41,941 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:41,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54018 deadline: 1733617361939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:41,941 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:41,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54024 deadline: 1733617361939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:41,942 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:41,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54004 deadline: 1733617361939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:41,942 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:41,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54014 deadline: 1733617361940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:41,946 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:41,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54040 deadline: 1733617361944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:42,064 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:42,064 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-08T00:21:42,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:42,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. as already flushing 2024-12-08T00:21:42,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:42,066 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:42,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:42,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:42,146 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/72f959ce6eb9417d92e96a0a764212af 2024-12-08T00:21:42,150 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/6e74ac10d34343b885a947fd1373d67f as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/6e74ac10d34343b885a947fd1373d67f 2024-12-08T00:21:42,153 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/6e74ac10d34343b885a947fd1373d67f, entries=250, sequenceid=15, filesize=16.3 K 2024-12-08T00:21:42,154 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/c7a6bbb984204ae8bfab7d8976062ce0 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/c7a6bbb984204ae8bfab7d8976062ce0 2024-12-08T00:21:42,157 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/c7a6bbb984204ae8bfab7d8976062ce0, entries=150, sequenceid=15, filesize=11.7 K 2024-12-08T00:21:42,158 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/72f959ce6eb9417d92e96a0a764212af as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/72f959ce6eb9417d92e96a0a764212af 2024-12-08T00:21:42,160 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/72f959ce6eb9417d92e96a0a764212af, entries=150, sequenceid=15, filesize=11.7 K 2024-12-08T00:21:42,161 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for cc3ed2949e0e40ebaa106781844b31d7 in 537ms, sequenceid=15, compaction requested=false 2024-12-08T00:21:42,161 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cc3ed2949e0e40ebaa106781844b31d7: 2024-12-08T00:21:42,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-08T00:21:42,218 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:42,218 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-08T00:21:42,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:42,219 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2837): Flushing cc3ed2949e0e40ebaa106781844b31d7 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-08T00:21:42,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=A 2024-12-08T00:21:42,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:42,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=B 2024-12-08T00:21:42,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:42,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=C 2024-12-08T00:21:42,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:42,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/6841e0a9c427495481f7a3780c1d5348 is 50, key is test_row_0/A:col10/1733617301632/Put/seqid=0 2024-12-08T00:21:42,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742375_1551 (size=12001) 2024-12-08T00:21:42,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on cc3ed2949e0e40ebaa106781844b31d7 2024-12-08T00:21:42,245 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. as already flushing 2024-12-08T00:21:42,254 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:42,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54014 deadline: 1733617362250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:42,254 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:42,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54018 deadline: 1733617362251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:42,256 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:42,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54024 deadline: 1733617362252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:42,256 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:42,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54040 deadline: 1733617362253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:42,257 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:42,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54004 deadline: 1733617362254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:42,358 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:42,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54014 deadline: 1733617362355, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:42,359 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:42,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54018 deadline: 1733617362355, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:42,359 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:42,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54024 deadline: 1733617362357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:42,361 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:42,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54040 deadline: 1733617362357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:42,361 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:42,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54004 deadline: 1733617362358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:42,564 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:42,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54018 deadline: 1733617362560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:42,564 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:42,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54014 deadline: 1733617362561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:42,565 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:42,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54024 deadline: 1733617362561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:42,566 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:42,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54040 deadline: 1733617362562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:42,566 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:42,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54004 deadline: 1733617362563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:42,628 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/6841e0a9c427495481f7a3780c1d5348 2024-12-08T00:21:42,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/d93a9d374ca94c33a31ab37cfde4543a is 50, key is test_row_0/B:col10/1733617301632/Put/seqid=0 2024-12-08T00:21:42,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742376_1552 (size=12001) 2024-12-08T00:21:42,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-08T00:21:42,866 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:42,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54018 deadline: 1733617362865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:42,870 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:42,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54040 deadline: 1733617362867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:42,871 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:42,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54004 deadline: 1733617362867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:42,871 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:42,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54024 deadline: 1733617362868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:42,871 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:42,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54014 deadline: 1733617362868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:43,040 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/d93a9d374ca94c33a31ab37cfde4543a 2024-12-08T00:21:43,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/c6504c608b87439e94d2d4cee64a0b2d is 50, key is test_row_0/C:col10/1733617301632/Put/seqid=0 2024-12-08T00:21:43,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742377_1553 (size=12001) 2024-12-08T00:21:43,055 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/c6504c608b87439e94d2d4cee64a0b2d 2024-12-08T00:21:43,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/6841e0a9c427495481f7a3780c1d5348 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/6841e0a9c427495481f7a3780c1d5348 2024-12-08T00:21:43,063 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/6841e0a9c427495481f7a3780c1d5348, entries=150, sequenceid=38, filesize=11.7 K 2024-12-08T00:21:43,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/d93a9d374ca94c33a31ab37cfde4543a as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/d93a9d374ca94c33a31ab37cfde4543a 2024-12-08T00:21:43,067 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/d93a9d374ca94c33a31ab37cfde4543a, entries=150, sequenceid=38, filesize=11.7 K 2024-12-08T00:21:43,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/c6504c608b87439e94d2d4cee64a0b2d as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/c6504c608b87439e94d2d4cee64a0b2d 2024-12-08T00:21:43,071 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/c6504c608b87439e94d2d4cee64a0b2d, entries=150, sequenceid=38, filesize=11.7 K 2024-12-08T00:21:43,072 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for cc3ed2949e0e40ebaa106781844b31d7 in 854ms, sequenceid=38, compaction requested=false 2024-12-08T00:21:43,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2538): Flush status journal for cc3ed2949e0e40ebaa106781844b31d7: 2024-12-08T00:21:43,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:43,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=135 2024-12-08T00:21:43,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4106): Remote procedure done, pid=135 2024-12-08T00:21:43,075 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=135, resume processing ppid=134 2024-12-08T00:21:43,075 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=135, ppid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4690 sec 2024-12-08T00:21:43,076 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees in 1.4730 sec 2024-12-08T00:21:43,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on cc3ed2949e0e40ebaa106781844b31d7 2024-12-08T00:21:43,371 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cc3ed2949e0e40ebaa106781844b31d7 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-08T00:21:43,371 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=A 2024-12-08T00:21:43,371 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:43,371 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=B 2024-12-08T00:21:43,371 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:43,371 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=C 2024-12-08T00:21:43,371 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:43,376 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/d2d8c645962a4086a880d88fc59f9871 is 50, key is test_row_0/A:col10/1733617302252/Put/seqid=0 2024-12-08T00:21:43,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742378_1554 (size=14341) 2024-12-08T00:21:43,380 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/d2d8c645962a4086a880d88fc59f9871 2024-12-08T00:21:43,386 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/6d8f3d193fc04814989cd6bbea9308af is 50, key is test_row_0/B:col10/1733617302252/Put/seqid=0 2024-12-08T00:21:43,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742379_1555 (size=12001) 2024-12-08T00:21:43,390 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/6d8f3d193fc04814989cd6bbea9308af 2024-12-08T00:21:43,395 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/6d960b888ad74a1f87b71144a83630ea is 50, key is test_row_0/C:col10/1733617302252/Put/seqid=0 2024-12-08T00:21:43,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742380_1556 (size=12001) 2024-12-08T00:21:43,403 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:43,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54024 deadline: 1733617363397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:43,403 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/6d960b888ad74a1f87b71144a83630ea 2024-12-08T00:21:43,406 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/d2d8c645962a4086a880d88fc59f9871 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/d2d8c645962a4086a880d88fc59f9871 2024-12-08T00:21:43,407 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:43,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54014 deadline: 1733617363398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:43,407 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:43,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54040 deadline: 1733617363399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:43,409 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/d2d8c645962a4086a880d88fc59f9871, entries=200, sequenceid=52, filesize=14.0 K 2024-12-08T00:21:43,409 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/6d8f3d193fc04814989cd6bbea9308af as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/6d8f3d193fc04814989cd6bbea9308af 2024-12-08T00:21:43,410 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:43,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54004 deadline: 1733617363402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:43,410 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:43,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54018 deadline: 1733617363403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:43,413 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/6d8f3d193fc04814989cd6bbea9308af, entries=150, sequenceid=52, filesize=11.7 K 2024-12-08T00:21:43,413 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/6d960b888ad74a1f87b71144a83630ea as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/6d960b888ad74a1f87b71144a83630ea 2024-12-08T00:21:43,416 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/6d960b888ad74a1f87b71144a83630ea, entries=150, sequenceid=52, filesize=11.7 K 2024-12-08T00:21:43,416 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for cc3ed2949e0e40ebaa106781844b31d7 in 45ms, sequenceid=52, compaction requested=true 2024-12-08T00:21:43,416 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cc3ed2949e0e40ebaa106781844b31d7: 2024-12-08T00:21:43,417 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc3ed2949e0e40ebaa106781844b31d7:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:21:43,417 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:43,417 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:21:43,417 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:21:43,417 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc3ed2949e0e40ebaa106781844b31d7:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T00:21:43,417 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:43,417 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc3ed2949e0e40ebaa106781844b31d7:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:21:43,417 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:21:43,419 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 43023 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:21:43,419 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): cc3ed2949e0e40ebaa106781844b31d7/A is initiating minor compaction (all files) 2024-12-08T00:21:43,419 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc3ed2949e0e40ebaa106781844b31d7/A in TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:43,419 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/6e74ac10d34343b885a947fd1373d67f, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/6841e0a9c427495481f7a3780c1d5348, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/d2d8c645962a4086a880d88fc59f9871] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp, totalSize=42.0 K 2024-12-08T00:21:43,420 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:21:43,420 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): cc3ed2949e0e40ebaa106781844b31d7/B is initiating minor compaction (all files) 2024-12-08T00:21:43,420 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc3ed2949e0e40ebaa106781844b31d7/B in TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:43,420 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/c7a6bbb984204ae8bfab7d8976062ce0, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/d93a9d374ca94c33a31ab37cfde4543a, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/6d8f3d193fc04814989cd6bbea9308af] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp, totalSize=35.2 K 2024-12-08T00:21:43,420 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6e74ac10d34343b885a947fd1373d67f, keycount=250, bloomtype=ROW, size=16.3 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1733617301621 2024-12-08T00:21:43,420 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting c7a6bbb984204ae8bfab7d8976062ce0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1733617301621 2024-12-08T00:21:43,421 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting d93a9d374ca94c33a31ab37cfde4543a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1733617301632 2024-12-08T00:21:43,421 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6841e0a9c427495481f7a3780c1d5348, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1733617301632 2024-12-08T00:21:43,421 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 6d8f3d193fc04814989cd6bbea9308af, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733617302251 2024-12-08T00:21:43,421 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting d2d8c645962a4086a880d88fc59f9871, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733617302251 2024-12-08T00:21:43,436 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc3ed2949e0e40ebaa106781844b31d7#B#compaction#461 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:43,437 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/89cfa2deff004ca1875dced7411e4da4 is 50, key is test_row_0/B:col10/1733617302252/Put/seqid=0 2024-12-08T00:21:43,447 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc3ed2949e0e40ebaa106781844b31d7#A#compaction#462 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:43,447 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/9ec1f570bc8148c79d6d13f593983431 is 50, key is test_row_0/A:col10/1733617302252/Put/seqid=0 2024-12-08T00:21:43,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742381_1557 (size=12104) 2024-12-08T00:21:43,458 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/89cfa2deff004ca1875dced7411e4da4 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/89cfa2deff004ca1875dced7411e4da4 2024-12-08T00:21:43,462 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc3ed2949e0e40ebaa106781844b31d7/B of cc3ed2949e0e40ebaa106781844b31d7 into 89cfa2deff004ca1875dced7411e4da4(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:43,462 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc3ed2949e0e40ebaa106781844b31d7: 2024-12-08T00:21:43,462 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7., storeName=cc3ed2949e0e40ebaa106781844b31d7/B, priority=13, startTime=1733617303417; duration=0sec 2024-12-08T00:21:43,462 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:21:43,462 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc3ed2949e0e40ebaa106781844b31d7:B 2024-12-08T00:21:43,462 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:21:43,463 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:21:43,463 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): cc3ed2949e0e40ebaa106781844b31d7/C is initiating minor compaction (all files) 2024-12-08T00:21:43,463 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc3ed2949e0e40ebaa106781844b31d7/C in TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:43,463 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/72f959ce6eb9417d92e96a0a764212af, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/c6504c608b87439e94d2d4cee64a0b2d, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/6d960b888ad74a1f87b71144a83630ea] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp, totalSize=35.2 K 2024-12-08T00:21:43,464 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 72f959ce6eb9417d92e96a0a764212af, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1733617301621 2024-12-08T00:21:43,464 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting c6504c608b87439e94d2d4cee64a0b2d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1733617301632 2024-12-08T00:21:43,464 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 6d960b888ad74a1f87b71144a83630ea, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733617302251 2024-12-08T00:21:43,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742382_1558 (size=12104) 2024-12-08T00:21:43,479 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc3ed2949e0e40ebaa106781844b31d7#C#compaction#463 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:43,479 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/253a4cbfc8614e619dcf355c934048af is 50, key is test_row_0/C:col10/1733617302252/Put/seqid=0 2024-12-08T00:21:43,485 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/9ec1f570bc8148c79d6d13f593983431 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/9ec1f570bc8148c79d6d13f593983431 2024-12-08T00:21:43,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742383_1559 (size=12104) 2024-12-08T00:21:43,491 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc3ed2949e0e40ebaa106781844b31d7/A of cc3ed2949e0e40ebaa106781844b31d7 into 9ec1f570bc8148c79d6d13f593983431(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:43,491 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc3ed2949e0e40ebaa106781844b31d7: 2024-12-08T00:21:43,491 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7., storeName=cc3ed2949e0e40ebaa106781844b31d7/A, priority=13, startTime=1733617303417; duration=0sec 2024-12-08T00:21:43,491 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:43,492 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc3ed2949e0e40ebaa106781844b31d7:A 2024-12-08T00:21:43,495 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/253a4cbfc8614e619dcf355c934048af as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/253a4cbfc8614e619dcf355c934048af 2024-12-08T00:21:43,500 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc3ed2949e0e40ebaa106781844b31d7/C of cc3ed2949e0e40ebaa106781844b31d7 into 253a4cbfc8614e619dcf355c934048af(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:43,500 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc3ed2949e0e40ebaa106781844b31d7: 2024-12-08T00:21:43,500 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7., storeName=cc3ed2949e0e40ebaa106781844b31d7/C, priority=13, startTime=1733617303417; duration=0sec 2024-12-08T00:21:43,500 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:43,500 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc3ed2949e0e40ebaa106781844b31d7:C 2024-12-08T00:21:43,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on cc3ed2949e0e40ebaa106781844b31d7 2024-12-08T00:21:43,506 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cc3ed2949e0e40ebaa106781844b31d7 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-08T00:21:43,507 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=A 2024-12-08T00:21:43,507 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:43,507 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=B 2024-12-08T00:21:43,507 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:43,507 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=C 2024-12-08T00:21:43,507 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:43,511 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/edb3ae3180e640a08a7ee5b3d67ba08a is 50, key is test_row_0/A:col10/1733617303401/Put/seqid=0 2024-12-08T00:21:43,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742384_1560 (size=12001) 2024-12-08T00:21:43,517 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/edb3ae3180e640a08a7ee5b3d67ba08a 2024-12-08T00:21:43,523 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/25b11440b897458baabf93b199b3968b is 50, key is test_row_0/B:col10/1733617303401/Put/seqid=0 2024-12-08T00:21:43,525 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:43,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54018 deadline: 1733617363517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:43,525 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:43,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54040 deadline: 1733617363518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:43,525 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:43,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54014 deadline: 1733617363522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:43,529 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:43,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742385_1561 (size=12001) 2024-12-08T00:21:43,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54004 deadline: 1733617363524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:43,529 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:43,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54024 deadline: 1733617363525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:43,628 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:43,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54040 deadline: 1733617363626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:43,628 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:43,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54018 deadline: 1733617363626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:43,628 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:43,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54014 deadline: 1733617363626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:43,635 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:43,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54004 deadline: 1733617363630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:43,635 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:43,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54024 deadline: 1733617363630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:43,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-08T00:21:43,708 INFO [Thread-2403 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 134 completed 2024-12-08T00:21:43,709 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T00:21:43,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees 2024-12-08T00:21:43,711 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:21:43,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-08T00:21:43,711 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:21:43,711 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:21:43,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-08T00:21:43,832 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:43,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54040 deadline: 1733617363829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:43,833 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:43,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54018 deadline: 1733617363829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:43,834 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:43,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54014 deadline: 1733617363830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:43,840 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:43,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54004 deadline: 1733617363836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:43,841 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:43,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54024 deadline: 1733617363837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:43,863 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:43,863 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-08T00:21:43,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:43,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. as already flushing 2024-12-08T00:21:43,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:43,864 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:43,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:43,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:43,929 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/25b11440b897458baabf93b199b3968b 2024-12-08T00:21:43,937 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/cec338acf293420a9d5a55595d2e673b is 50, key is test_row_0/C:col10/1733617303401/Put/seqid=0 2024-12-08T00:21:43,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742386_1562 (size=12001) 2024-12-08T00:21:43,948 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/cec338acf293420a9d5a55595d2e673b 2024-12-08T00:21:43,952 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/edb3ae3180e640a08a7ee5b3d67ba08a as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/edb3ae3180e640a08a7ee5b3d67ba08a 2024-12-08T00:21:43,956 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/edb3ae3180e640a08a7ee5b3d67ba08a, entries=150, sequenceid=78, filesize=11.7 K 2024-12-08T00:21:43,957 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/25b11440b897458baabf93b199b3968b as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/25b11440b897458baabf93b199b3968b 2024-12-08T00:21:43,960 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/25b11440b897458baabf93b199b3968b, entries=150, sequenceid=78, filesize=11.7 K 2024-12-08T00:21:43,961 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/cec338acf293420a9d5a55595d2e673b as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/cec338acf293420a9d5a55595d2e673b 2024-12-08T00:21:43,965 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/cec338acf293420a9d5a55595d2e673b, entries=150, sequenceid=78, filesize=11.7 K 2024-12-08T00:21:43,965 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for cc3ed2949e0e40ebaa106781844b31d7 in 459ms, sequenceid=78, compaction requested=false 2024-12-08T00:21:43,966 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cc3ed2949e0e40ebaa106781844b31d7: 2024-12-08T00:21:44,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-08T00:21:44,016 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:44,016 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-08T00:21:44,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:44,016 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2837): Flushing cc3ed2949e0e40ebaa106781844b31d7 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-08T00:21:44,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=A 2024-12-08T00:21:44,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:44,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=B 2024-12-08T00:21:44,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:44,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=C 2024-12-08T00:21:44,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:44,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/7fbd756d75a54744a37657d58814220f is 50, key is test_row_0/A:col10/1733617303523/Put/seqid=0 2024-12-08T00:21:44,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742387_1563 (size=12001) 2024-12-08T00:21:44,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on cc3ed2949e0e40ebaa106781844b31d7 2024-12-08T00:21:44,136 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. as already flushing 2024-12-08T00:21:44,170 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:44,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54014 deadline: 1733617364164, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:44,170 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:44,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54024 deadline: 1733617364169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:44,177 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:44,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54018 deadline: 1733617364170, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:44,178 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:44,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54004 deadline: 1733617364170, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:44,178 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:44,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54040 deadline: 1733617364170, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:44,276 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:44,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54024 deadline: 1733617364271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:44,276 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:44,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54014 deadline: 1733617364271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:44,283 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:44,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54018 deadline: 1733617364279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:44,283 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:44,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54004 deadline: 1733617364279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:44,284 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:44,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54040 deadline: 1733617364279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:44,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-08T00:21:44,424 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/7fbd756d75a54744a37657d58814220f 2024-12-08T00:21:44,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/4eeffd78402941be8e55203a2a9075d9 is 50, key is test_row_0/B:col10/1733617303523/Put/seqid=0 2024-12-08T00:21:44,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742388_1564 (size=12001) 2024-12-08T00:21:44,438 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/4eeffd78402941be8e55203a2a9075d9 2024-12-08T00:21:44,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/1328c28560be4450b74b402482cbd3df is 50, key is test_row_0/C:col10/1733617303523/Put/seqid=0 2024-12-08T00:21:44,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742389_1565 (size=12001) 2024-12-08T00:21:44,462 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/1328c28560be4450b74b402482cbd3df 2024-12-08T00:21:44,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/7fbd756d75a54744a37657d58814220f as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/7fbd756d75a54744a37657d58814220f 2024-12-08T00:21:44,472 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/7fbd756d75a54744a37657d58814220f, entries=150, sequenceid=91, filesize=11.7 K 2024-12-08T00:21:44,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/4eeffd78402941be8e55203a2a9075d9 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/4eeffd78402941be8e55203a2a9075d9 2024-12-08T00:21:44,479 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/4eeffd78402941be8e55203a2a9075d9, entries=150, sequenceid=91, filesize=11.7 K 2024-12-08T00:21:44,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/1328c28560be4450b74b402482cbd3df as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/1328c28560be4450b74b402482cbd3df 2024-12-08T00:21:44,483 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:44,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54024 deadline: 1733617364478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:44,483 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:44,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54014 deadline: 1733617364478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:44,488 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/1328c28560be4450b74b402482cbd3df, entries=150, sequenceid=91, filesize=11.7 K 2024-12-08T00:21:44,489 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for cc3ed2949e0e40ebaa106781844b31d7 in 473ms, sequenceid=91, compaction requested=true 2024-12-08T00:21:44,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2538): Flush status journal for cc3ed2949e0e40ebaa106781844b31d7: 2024-12-08T00:21:44,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:44,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=137 2024-12-08T00:21:44,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4106): Remote procedure done, pid=137 2024-12-08T00:21:44,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on cc3ed2949e0e40ebaa106781844b31d7 2024-12-08T00:21:44,491 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cc3ed2949e0e40ebaa106781844b31d7 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-08T00:21:44,492 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=A 2024-12-08T00:21:44,492 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:44,492 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=B 2024-12-08T00:21:44,492 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:44,492 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=C 2024-12-08T00:21:44,492 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:44,492 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=137, resume processing ppid=136 2024-12-08T00:21:44,492 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=137, ppid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 779 msec 2024-12-08T00:21:44,496 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees in 784 msec 2024-12-08T00:21:44,509 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/e861d123bc834f809b629517e4838f63 is 50, key is test_row_0/A:col10/1733617304168/Put/seqid=0 2024-12-08T00:21:44,514 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:44,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54018 deadline: 1733617364505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:44,514 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:44,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54004 deadline: 1733617364506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:44,516 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:44,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54040 deadline: 1733617364514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:44,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742390_1566 (size=16681) 2024-12-08T00:21:44,529 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/e861d123bc834f809b629517e4838f63 2024-12-08T00:21:44,537 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/24f6670016b34f3cb1d01a0bca01c514 is 50, key is test_row_0/B:col10/1733617304168/Put/seqid=0 2024-12-08T00:21:44,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742391_1567 (size=12001) 2024-12-08T00:21:44,543 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/24f6670016b34f3cb1d01a0bca01c514 2024-12-08T00:21:44,555 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/d2321f534a4348e48055d87122c8a4eb is 50, key is test_row_0/C:col10/1733617304168/Put/seqid=0 2024-12-08T00:21:44,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742392_1568 (size=12001) 2024-12-08T00:21:44,618 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:44,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54018 deadline: 1733617364615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:44,619 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:44,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54004 deadline: 1733617364616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:44,628 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:44,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54040 deadline: 1733617364617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:44,633 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-08T00:21:44,789 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:44,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54014 deadline: 1733617364785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:44,789 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:44,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54024 deadline: 1733617364785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:44,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-08T00:21:44,814 INFO [Thread-2403 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 136 completed 2024-12-08T00:21:44,815 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T00:21:44,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees 2024-12-08T00:21:44,817 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:21:44,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-08T00:21:44,818 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:21:44,818 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=139, ppid=138, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:21:44,822 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:44,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54018 deadline: 1733617364820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:44,833 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:44,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54040 deadline: 1733617364829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:44,833 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:44,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54004 deadline: 1733617364829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:44,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-08T00:21:44,959 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/d2321f534a4348e48055d87122c8a4eb 2024-12-08T00:21:44,963 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/e861d123bc834f809b629517e4838f63 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/e861d123bc834f809b629517e4838f63 2024-12-08T00:21:44,966 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/e861d123bc834f809b629517e4838f63, entries=250, sequenceid=117, filesize=16.3 K 2024-12-08T00:21:44,967 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/24f6670016b34f3cb1d01a0bca01c514 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/24f6670016b34f3cb1d01a0bca01c514 2024-12-08T00:21:44,970 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:44,970 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/24f6670016b34f3cb1d01a0bca01c514, entries=150, sequenceid=117, filesize=11.7 K 2024-12-08T00:21:44,971 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-08T00:21:44,971 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/d2321f534a4348e48055d87122c8a4eb as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/d2321f534a4348e48055d87122c8a4eb 2024-12-08T00:21:44,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:44,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. as already flushing 2024-12-08T00:21:44,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:44,972 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:44,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:44,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:44,976 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/d2321f534a4348e48055d87122c8a4eb, entries=150, sequenceid=117, filesize=11.7 K 2024-12-08T00:21:44,977 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for cc3ed2949e0e40ebaa106781844b31d7 in 486ms, sequenceid=117, compaction requested=true 2024-12-08T00:21:44,977 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cc3ed2949e0e40ebaa106781844b31d7: 2024-12-08T00:21:44,977 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc3ed2949e0e40ebaa106781844b31d7:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:21:44,977 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:44,977 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T00:21:44,977 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc3ed2949e0e40ebaa106781844b31d7:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T00:21:44,977 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:44,977 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc3ed2949e0e40ebaa106781844b31d7:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:21:44,977 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:21:44,977 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T00:21:44,979 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T00:21:44,979 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): cc3ed2949e0e40ebaa106781844b31d7/B is initiating minor compaction (all files) 2024-12-08T00:21:44,979 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc3ed2949e0e40ebaa106781844b31d7/B in TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:44,979 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/89cfa2deff004ca1875dced7411e4da4, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/25b11440b897458baabf93b199b3968b, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/4eeffd78402941be8e55203a2a9075d9, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/24f6670016b34f3cb1d01a0bca01c514] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp, totalSize=47.0 K 2024-12-08T00:21:44,979 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 52787 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T00:21:44,979 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): cc3ed2949e0e40ebaa106781844b31d7/A is initiating minor compaction (all files) 2024-12-08T00:21:44,979 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc3ed2949e0e40ebaa106781844b31d7/A in TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:44,979 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/9ec1f570bc8148c79d6d13f593983431, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/edb3ae3180e640a08a7ee5b3d67ba08a, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/7fbd756d75a54744a37657d58814220f, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/e861d123bc834f809b629517e4838f63] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp, totalSize=51.5 K 2024-12-08T00:21:44,980 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 89cfa2deff004ca1875dced7411e4da4, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733617302251 2024-12-08T00:21:44,980 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9ec1f570bc8148c79d6d13f593983431, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733617302251 2024-12-08T00:21:44,980 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 25b11440b897458baabf93b199b3968b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733617303398 2024-12-08T00:21:44,980 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting edb3ae3180e640a08a7ee5b3d67ba08a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733617303398 2024-12-08T00:21:44,980 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 4eeffd78402941be8e55203a2a9075d9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1733617303514 2024-12-08T00:21:44,981 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7fbd756d75a54744a37657d58814220f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1733617303514 2024-12-08T00:21:44,981 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 24f6670016b34f3cb1d01a0bca01c514, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1733617304168 2024-12-08T00:21:44,981 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting e861d123bc834f809b629517e4838f63, keycount=250, bloomtype=ROW, size=16.3 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1733617304168 2024-12-08T00:21:44,992 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc3ed2949e0e40ebaa106781844b31d7#A#compaction#473 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:44,993 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/a8493ab76b19410182fc995fe393ba93 is 50, key is test_row_0/A:col10/1733617304168/Put/seqid=0 2024-12-08T00:21:44,995 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc3ed2949e0e40ebaa106781844b31d7#B#compaction#474 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:44,997 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/c8f31cf51983415bab832755f06eb4c9 is 50, key is test_row_0/B:col10/1733617304168/Put/seqid=0 2024-12-08T00:21:45,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742393_1569 (size=12241) 2024-12-08T00:21:45,007 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/a8493ab76b19410182fc995fe393ba93 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/a8493ab76b19410182fc995fe393ba93 2024-12-08T00:21:45,012 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in cc3ed2949e0e40ebaa106781844b31d7/A of cc3ed2949e0e40ebaa106781844b31d7 into a8493ab76b19410182fc995fe393ba93(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:45,012 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc3ed2949e0e40ebaa106781844b31d7: 2024-12-08T00:21:45,012 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7., storeName=cc3ed2949e0e40ebaa106781844b31d7/A, priority=12, startTime=1733617304977; duration=0sec 2024-12-08T00:21:45,013 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:21:45,013 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc3ed2949e0e40ebaa106781844b31d7:A 2024-12-08T00:21:45,013 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T00:21:45,015 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T00:21:45,015 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): cc3ed2949e0e40ebaa106781844b31d7/C is initiating minor compaction (all files) 2024-12-08T00:21:45,015 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc3ed2949e0e40ebaa106781844b31d7/C in TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:45,015 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/253a4cbfc8614e619dcf355c934048af, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/cec338acf293420a9d5a55595d2e673b, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/1328c28560be4450b74b402482cbd3df, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/d2321f534a4348e48055d87122c8a4eb] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp, totalSize=47.0 K 2024-12-08T00:21:45,015 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 253a4cbfc8614e619dcf355c934048af, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733617302251 2024-12-08T00:21:45,016 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting cec338acf293420a9d5a55595d2e673b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733617303398 2024-12-08T00:21:45,017 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1328c28560be4450b74b402482cbd3df, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1733617303514 2024-12-08T00:21:45,017 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting d2321f534a4348e48055d87122c8a4eb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1733617304168 2024-12-08T00:21:45,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742394_1570 (size=12241) 2024-12-08T00:21:45,033 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/c8f31cf51983415bab832755f06eb4c9 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/c8f31cf51983415bab832755f06eb4c9 2024-12-08T00:21:45,035 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc3ed2949e0e40ebaa106781844b31d7#C#compaction#475 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:45,036 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/59acd7f495bc48b8ae7d9ff0fec560da is 50, key is test_row_0/C:col10/1733617304168/Put/seqid=0 2024-12-08T00:21:45,039 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in cc3ed2949e0e40ebaa106781844b31d7/B of cc3ed2949e0e40ebaa106781844b31d7 into c8f31cf51983415bab832755f06eb4c9(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:45,039 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc3ed2949e0e40ebaa106781844b31d7: 2024-12-08T00:21:45,039 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7., storeName=cc3ed2949e0e40ebaa106781844b31d7/B, priority=12, startTime=1733617304977; duration=0sec 2024-12-08T00:21:45,039 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:45,039 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc3ed2949e0e40ebaa106781844b31d7:B 2024-12-08T00:21:45,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742395_1571 (size=12241) 2024-12-08T00:21:45,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-08T00:21:45,125 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:45,125 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-08T00:21:45,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:45,125 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2837): Flushing cc3ed2949e0e40ebaa106781844b31d7 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-08T00:21:45,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=A 2024-12-08T00:21:45,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:45,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=B 2024-12-08T00:21:45,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:45,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=C 2024-12-08T00:21:45,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:45,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on cc3ed2949e0e40ebaa106781844b31d7 2024-12-08T00:21:45,128 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. as already flushing 2024-12-08T00:21:45,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/6e932e61f9e2425ea6c4ae2249f2b84a is 50, key is test_row_0/A:col10/1733617304513/Put/seqid=0 2024-12-08T00:21:45,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742396_1572 (size=12001) 2024-12-08T00:21:45,136 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/6e932e61f9e2425ea6c4ae2249f2b84a 2024-12-08T00:21:45,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/41bd9df4308148a1ba2a8b4bb21157e5 is 50, key is test_row_0/B:col10/1733617304513/Put/seqid=0 2024-12-08T00:21:45,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742397_1573 (size=12001) 2024-12-08T00:21:45,162 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/41bd9df4308148a1ba2a8b4bb21157e5 2024-12-08T00:21:45,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/7182fabf4980437ba10f29bab95911a5 is 50, key is test_row_0/C:col10/1733617304513/Put/seqid=0 2024-12-08T00:21:45,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742398_1574 (size=12001) 2024-12-08T00:21:45,192 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/7182fabf4980437ba10f29bab95911a5 2024-12-08T00:21:45,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/6e932e61f9e2425ea6c4ae2249f2b84a as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/6e932e61f9e2425ea6c4ae2249f2b84a 2024-12-08T00:21:45,199 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/6e932e61f9e2425ea6c4ae2249f2b84a, entries=150, sequenceid=129, filesize=11.7 K 2024-12-08T00:21:45,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/41bd9df4308148a1ba2a8b4bb21157e5 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/41bd9df4308148a1ba2a8b4bb21157e5 2024-12-08T00:21:45,204 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:45,204 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/41bd9df4308148a1ba2a8b4bb21157e5, entries=150, sequenceid=129, filesize=11.7 K 2024-12-08T00:21:45,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54040 deadline: 1733617365195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:45,205 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:45,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54018 deadline: 1733617365197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:45,206 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/7182fabf4980437ba10f29bab95911a5 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/7182fabf4980437ba10f29bab95911a5 2024-12-08T00:21:45,208 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/7182fabf4980437ba10f29bab95911a5, entries=150, sequenceid=129, filesize=11.7 K 2024-12-08T00:21:45,209 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for cc3ed2949e0e40ebaa106781844b31d7 in 84ms, sequenceid=129, compaction requested=false 2024-12-08T00:21:45,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2538): Flush status journal for cc3ed2949e0e40ebaa106781844b31d7: 2024-12-08T00:21:45,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:45,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=139 2024-12-08T00:21:45,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on cc3ed2949e0e40ebaa106781844b31d7 2024-12-08T00:21:45,211 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cc3ed2949e0e40ebaa106781844b31d7 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-08T00:21:45,211 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=A 2024-12-08T00:21:45,211 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:45,211 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=B 2024-12-08T00:21:45,211 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:45,211 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=C 2024-12-08T00:21:45,211 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:45,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4106): Remote procedure done, pid=139 2024-12-08T00:21:45,216 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=139, resume processing ppid=138 2024-12-08T00:21:45,216 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=139, ppid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 396 msec 2024-12-08T00:21:45,220 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees in 402 msec 2024-12-08T00:21:45,226 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/6f6f03b235a0492abf688d6d4f73e524 is 50, key is test_row_0/A:col10/1733617305199/Put/seqid=0 2024-12-08T00:21:45,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742399_1575 (size=12151) 2024-12-08T00:21:45,254 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:45,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54004 deadline: 1733617365251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:45,297 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:45,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54014 deadline: 1733617365291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:45,297 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:45,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54024 deadline: 1733617365292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:45,311 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:45,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54040 deadline: 1733617365305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:45,311 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:45,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54018 deadline: 1733617365306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:45,358 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:45,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54004 deadline: 1733617365355, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:45,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-08T00:21:45,420 INFO [Thread-2403 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 138 completed 2024-12-08T00:21:45,421 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T00:21:45,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees 2024-12-08T00:21:45,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-08T00:21:45,422 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:21:45,423 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:21:45,423 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=141, ppid=140, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:21:45,446 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/59acd7f495bc48b8ae7d9ff0fec560da as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/59acd7f495bc48b8ae7d9ff0fec560da 2024-12-08T00:21:45,450 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in cc3ed2949e0e40ebaa106781844b31d7/C of cc3ed2949e0e40ebaa106781844b31d7 into 59acd7f495bc48b8ae7d9ff0fec560da(size=12.0 K), total size for store is 23.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:45,450 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc3ed2949e0e40ebaa106781844b31d7: 2024-12-08T00:21:45,450 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7., storeName=cc3ed2949e0e40ebaa106781844b31d7/C, priority=12, startTime=1733617304977; duration=0sec 2024-12-08T00:21:45,450 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:45,450 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc3ed2949e0e40ebaa106781844b31d7:C 2024-12-08T00:21:45,516 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:45,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54040 deadline: 1733617365512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:45,516 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:45,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54018 deadline: 1733617365512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:45,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-08T00:21:45,563 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:45,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54004 deadline: 1733617365559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:45,574 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:45,574 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-12-08T00:21:45,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:45,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. as already flushing 2024-12-08T00:21:45,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:45,575 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:45,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:45,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:45,633 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/6f6f03b235a0492abf688d6d4f73e524 2024-12-08T00:21:45,639 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/600c6e2adbcc4086b7306b1a2898dc8c is 50, key is test_row_0/B:col10/1733617305199/Put/seqid=0 2024-12-08T00:21:45,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742400_1576 (size=12151) 2024-12-08T00:21:45,644 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/600c6e2adbcc4086b7306b1a2898dc8c 2024-12-08T00:21:45,650 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/268ba11eff7640858c71282975400036 is 50, key is test_row_0/C:col10/1733617305199/Put/seqid=0 2024-12-08T00:21:45,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742401_1577 (size=12151) 2024-12-08T00:21:45,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-08T00:21:45,726 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:45,727 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-12-08T00:21:45,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:45,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. as already flushing 2024-12-08T00:21:45,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:45,727 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:45,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:45,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:45,821 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:45,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54040 deadline: 1733617365817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:45,821 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:45,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54018 deadline: 1733617365818, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:45,866 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:45,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54004 deadline: 1733617365865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:45,879 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:45,879 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-12-08T00:21:45,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:45,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. as already flushing 2024-12-08T00:21:45,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:45,880 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:45,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:45,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:46,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-08T00:21:46,032 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:46,032 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-12-08T00:21:46,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:46,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. as already flushing 2024-12-08T00:21:46,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:46,033 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:46,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:46,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:46,054 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/268ba11eff7640858c71282975400036 2024-12-08T00:21:46,058 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/6f6f03b235a0492abf688d6d4f73e524 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/6f6f03b235a0492abf688d6d4f73e524 2024-12-08T00:21:46,061 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/6f6f03b235a0492abf688d6d4f73e524, entries=150, sequenceid=156, filesize=11.9 K 2024-12-08T00:21:46,062 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/600c6e2adbcc4086b7306b1a2898dc8c as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/600c6e2adbcc4086b7306b1a2898dc8c 2024-12-08T00:21:46,065 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/600c6e2adbcc4086b7306b1a2898dc8c, entries=150, sequenceid=156, filesize=11.9 K 2024-12-08T00:21:46,066 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/268ba11eff7640858c71282975400036 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/268ba11eff7640858c71282975400036 2024-12-08T00:21:46,069 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/268ba11eff7640858c71282975400036, entries=150, sequenceid=156, filesize=11.9 K 2024-12-08T00:21:46,071 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for cc3ed2949e0e40ebaa106781844b31d7 in 860ms, sequenceid=156, compaction requested=true 2024-12-08T00:21:46,071 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cc3ed2949e0e40ebaa106781844b31d7: 2024-12-08T00:21:46,071 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc3ed2949e0e40ebaa106781844b31d7:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:21:46,071 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:46,071 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:21:46,071 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc3ed2949e0e40ebaa106781844b31d7:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T00:21:46,071 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:46,071 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc3ed2949e0e40ebaa106781844b31d7:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:21:46,071 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:21:46,071 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:21:46,072 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:21:46,072 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): cc3ed2949e0e40ebaa106781844b31d7/A is initiating minor compaction (all files) 2024-12-08T00:21:46,072 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:21:46,072 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc3ed2949e0e40ebaa106781844b31d7/A in TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:46,072 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): cc3ed2949e0e40ebaa106781844b31d7/B is initiating minor compaction (all files) 2024-12-08T00:21:46,072 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/a8493ab76b19410182fc995fe393ba93, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/6e932e61f9e2425ea6c4ae2249f2b84a, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/6f6f03b235a0492abf688d6d4f73e524] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp, totalSize=35.5 K 2024-12-08T00:21:46,072 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc3ed2949e0e40ebaa106781844b31d7/B in TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:46,072 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/c8f31cf51983415bab832755f06eb4c9, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/41bd9df4308148a1ba2a8b4bb21157e5, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/600c6e2adbcc4086b7306b1a2898dc8c] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp, totalSize=35.5 K 2024-12-08T00:21:46,073 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting a8493ab76b19410182fc995fe393ba93, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1733617304168 2024-12-08T00:21:46,073 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting c8f31cf51983415bab832755f06eb4c9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1733617304168 2024-12-08T00:21:46,073 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6e932e61f9e2425ea6c4ae2249f2b84a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1733617304497 2024-12-08T00:21:46,073 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 41bd9df4308148a1ba2a8b4bb21157e5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1733617304497 2024-12-08T00:21:46,073 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6f6f03b235a0492abf688d6d4f73e524, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1733617305195 2024-12-08T00:21:46,074 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 600c6e2adbcc4086b7306b1a2898dc8c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1733617305195 2024-12-08T00:21:46,080 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc3ed2949e0e40ebaa106781844b31d7#A#compaction#482 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:46,081 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/3dd7cd84b6e3425082d67af99c664c07 is 50, key is test_row_0/A:col10/1733617305199/Put/seqid=0 2024-12-08T00:21:46,082 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc3ed2949e0e40ebaa106781844b31d7#B#compaction#483 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:46,083 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/dceed96ee6e14023b11b68dd742eb401 is 50, key is test_row_0/B:col10/1733617305199/Put/seqid=0 2024-12-08T00:21:46,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742402_1578 (size=12493) 2024-12-08T00:21:46,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742403_1579 (size=12493) 2024-12-08T00:21:46,185 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:46,186 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-12-08T00:21:46,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:46,186 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2837): Flushing cc3ed2949e0e40ebaa106781844b31d7 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-12-08T00:21:46,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=A 2024-12-08T00:21:46,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:46,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=B 2024-12-08T00:21:46,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:46,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=C 2024-12-08T00:21:46,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:46,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/29e2304293394a4393576669541e3170 is 50, key is test_row_0/A:col10/1733617305222/Put/seqid=0 2024-12-08T00:21:46,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742404_1580 (size=12151) 2024-12-08T00:21:46,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on cc3ed2949e0e40ebaa106781844b31d7 2024-12-08T00:21:46,305 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. as already flushing 2024-12-08T00:21:46,371 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:46,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54018 deadline: 1733617366364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:46,374 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:46,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54024 deadline: 1733617366366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:46,376 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:46,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54040 deadline: 1733617366370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:46,376 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:46,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54004 deadline: 1733617366370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:46,376 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:46,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54014 deadline: 1733617366371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:46,479 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:46,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54018 deadline: 1733617366473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:46,482 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:46,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54024 deadline: 1733617366475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:46,482 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:46,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54040 deadline: 1733617366477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:46,482 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:46,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54014 deadline: 1733617366477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:46,502 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/3dd7cd84b6e3425082d67af99c664c07 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/3dd7cd84b6e3425082d67af99c664c07 2024-12-08T00:21:46,505 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/dceed96ee6e14023b11b68dd742eb401 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/dceed96ee6e14023b11b68dd742eb401 2024-12-08T00:21:46,507 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc3ed2949e0e40ebaa106781844b31d7/A of cc3ed2949e0e40ebaa106781844b31d7 into 3dd7cd84b6e3425082d67af99c664c07(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:46,507 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc3ed2949e0e40ebaa106781844b31d7: 2024-12-08T00:21:46,507 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7., storeName=cc3ed2949e0e40ebaa106781844b31d7/A, priority=13, startTime=1733617306071; duration=0sec 2024-12-08T00:21:46,507 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:21:46,507 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc3ed2949e0e40ebaa106781844b31d7:A 2024-12-08T00:21:46,507 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:21:46,511 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:21:46,511 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): cc3ed2949e0e40ebaa106781844b31d7/C is initiating minor compaction (all files) 2024-12-08T00:21:46,511 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc3ed2949e0e40ebaa106781844b31d7/C in TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:46,511 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/59acd7f495bc48b8ae7d9ff0fec560da, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/7182fabf4980437ba10f29bab95911a5, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/268ba11eff7640858c71282975400036] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp, totalSize=35.5 K 2024-12-08T00:21:46,512 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 59acd7f495bc48b8ae7d9ff0fec560da, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1733617304168 2024-12-08T00:21:46,512 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7182fabf4980437ba10f29bab95911a5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1733617304497 2024-12-08T00:21:46,512 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 268ba11eff7640858c71282975400036, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1733617305195 2024-12-08T00:21:46,514 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc3ed2949e0e40ebaa106781844b31d7/B of cc3ed2949e0e40ebaa106781844b31d7 into dceed96ee6e14023b11b68dd742eb401(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:46,514 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc3ed2949e0e40ebaa106781844b31d7: 2024-12-08T00:21:46,514 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7., storeName=cc3ed2949e0e40ebaa106781844b31d7/B, priority=13, startTime=1733617306071; duration=0sec 2024-12-08T00:21:46,514 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:46,514 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc3ed2949e0e40ebaa106781844b31d7:B 2024-12-08T00:21:46,518 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc3ed2949e0e40ebaa106781844b31d7#C#compaction#485 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:46,519 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/df8c88d68f394d50a500b55dc67ff903 is 50, key is test_row_0/C:col10/1733617305199/Put/seqid=0 2024-12-08T00:21:46,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742405_1581 (size=12493) 2024-12-08T00:21:46,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-08T00:21:46,595 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=166 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/29e2304293394a4393576669541e3170 2024-12-08T00:21:46,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/86b83687f5cf4fa1bfe6268ad55f915f is 50, key is test_row_0/B:col10/1733617305222/Put/seqid=0 2024-12-08T00:21:46,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742406_1582 (size=12151) 2024-12-08T00:21:46,684 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:46,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54018 deadline: 1733617366682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:46,686 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:46,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54024 deadline: 1733617366683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:46,686 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:46,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54014 deadline: 1733617366683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:46,686 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:46,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54040 deadline: 1733617366684, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:46,927 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/df8c88d68f394d50a500b55dc67ff903 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/df8c88d68f394d50a500b55dc67ff903 2024-12-08T00:21:46,931 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc3ed2949e0e40ebaa106781844b31d7/C of cc3ed2949e0e40ebaa106781844b31d7 into df8c88d68f394d50a500b55dc67ff903(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:46,931 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc3ed2949e0e40ebaa106781844b31d7: 2024-12-08T00:21:46,931 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7., storeName=cc3ed2949e0e40ebaa106781844b31d7/C, priority=13, startTime=1733617306071; duration=0sec 2024-12-08T00:21:46,931 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:46,931 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc3ed2949e0e40ebaa106781844b31d7:C 2024-12-08T00:21:46,988 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:46,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54018 deadline: 1733617366985, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:46,991 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:46,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54014 deadline: 1733617366987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:46,992 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:46,992 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:46,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54024 deadline: 1733617366988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:46,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54040 deadline: 1733617366987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:47,011 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=166 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/86b83687f5cf4fa1bfe6268ad55f915f 2024-12-08T00:21:47,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/28600b4e0e4649f5ba012303553b5881 is 50, key is test_row_0/C:col10/1733617305222/Put/seqid=0 2024-12-08T00:21:47,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742407_1583 (size=12151) 2024-12-08T00:21:47,021 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=166 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/28600b4e0e4649f5ba012303553b5881 2024-12-08T00:21:47,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/29e2304293394a4393576669541e3170 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/29e2304293394a4393576669541e3170 2024-12-08T00:21:47,027 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/29e2304293394a4393576669541e3170, entries=150, sequenceid=166, filesize=11.9 K 2024-12-08T00:21:47,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/86b83687f5cf4fa1bfe6268ad55f915f as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/86b83687f5cf4fa1bfe6268ad55f915f 2024-12-08T00:21:47,032 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/86b83687f5cf4fa1bfe6268ad55f915f, entries=150, sequenceid=166, filesize=11.9 K 2024-12-08T00:21:47,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/28600b4e0e4649f5ba012303553b5881 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/28600b4e0e4649f5ba012303553b5881 2024-12-08T00:21:47,036 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/28600b4e0e4649f5ba012303553b5881, entries=150, sequenceid=166, filesize=11.9 K 2024-12-08T00:21:47,037 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=161.02 KB/164880 for cc3ed2949e0e40ebaa106781844b31d7 in 851ms, sequenceid=166, compaction requested=false 2024-12-08T00:21:47,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2538): Flush status journal for cc3ed2949e0e40ebaa106781844b31d7: 2024-12-08T00:21:47,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:47,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=141 2024-12-08T00:21:47,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4106): Remote procedure done, pid=141 2024-12-08T00:21:47,039 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=141, resume processing ppid=140 2024-12-08T00:21:47,039 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=141, ppid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6150 sec 2024-12-08T00:21:47,040 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees in 1.6180 sec 2024-12-08T00:21:47,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on cc3ed2949e0e40ebaa106781844b31d7 2024-12-08T00:21:47,381 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cc3ed2949e0e40ebaa106781844b31d7 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-12-08T00:21:47,381 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=A 2024-12-08T00:21:47,381 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:47,381 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=B 2024-12-08T00:21:47,382 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:47,382 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=C 2024-12-08T00:21:47,382 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:47,385 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/3a271de2808d4e17984938684046a6aa is 50, key is test_row_0/A:col10/1733617307380/Put/seqid=0 2024-12-08T00:21:47,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742408_1584 (size=14541) 2024-12-08T00:21:47,411 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:47,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54004 deadline: 1733617367406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:47,494 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:47,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54018 deadline: 1733617367491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:47,498 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:47,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54024 deadline: 1733617367494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:47,498 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:47,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54040 deadline: 1733617367495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:47,498 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:47,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54014 deadline: 1733617367495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:47,516 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:47,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54004 deadline: 1733617367512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:47,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-08T00:21:47,527 INFO [Thread-2403 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 140 completed 2024-12-08T00:21:47,528 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T00:21:47,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=142, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees 2024-12-08T00:21:47,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-08T00:21:47,529 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=142, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:21:47,530 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=142, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:21:47,530 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=143, ppid=142, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:21:47,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-08T00:21:47,681 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:47,682 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-12-08T00:21:47,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:47,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. as already flushing 2024-12-08T00:21:47,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:47,682 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:47,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:47,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:47,717 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:47,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54004 deadline: 1733617367717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:47,791 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/3a271de2808d4e17984938684046a6aa 2024-12-08T00:21:47,798 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/ab48a7f7b21446b1918c41c7f67e7cd7 is 50, key is test_row_0/B:col10/1733617307380/Put/seqid=0 2024-12-08T00:21:47,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742409_1585 (size=12151) 2024-12-08T00:21:47,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-08T00:21:47,834 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:47,834 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-12-08T00:21:47,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:47,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. as already flushing 2024-12-08T00:21:47,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:47,835 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:47,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:47,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:47,987 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:47,987 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-12-08T00:21:47,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:47,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. as already flushing 2024-12-08T00:21:47,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:47,987 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:47,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:47,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:48,024 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:48,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54004 deadline: 1733617368020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:48,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-08T00:21:48,139 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:48,140 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-12-08T00:21:48,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:48,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. as already flushing 2024-12-08T00:21:48,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:48,140 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:48,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:48,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:48,202 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/ab48a7f7b21446b1918c41c7f67e7cd7 2024-12-08T00:21:48,209 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/1ba072f695804f55b907c986af702435 is 50, key is test_row_0/C:col10/1733617307380/Put/seqid=0 2024-12-08T00:21:48,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742410_1586 (size=12151) 2024-12-08T00:21:48,213 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/1ba072f695804f55b907c986af702435 2024-12-08T00:21:48,217 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/3a271de2808d4e17984938684046a6aa as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/3a271de2808d4e17984938684046a6aa 2024-12-08T00:21:48,222 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/3a271de2808d4e17984938684046a6aa, entries=200, sequenceid=197, filesize=14.2 K 2024-12-08T00:21:48,222 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/ab48a7f7b21446b1918c41c7f67e7cd7 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/ab48a7f7b21446b1918c41c7f67e7cd7 2024-12-08T00:21:48,225 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/ab48a7f7b21446b1918c41c7f67e7cd7, entries=150, sequenceid=197, filesize=11.9 K 2024-12-08T00:21:48,226 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/1ba072f695804f55b907c986af702435 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/1ba072f695804f55b907c986af702435 2024-12-08T00:21:48,230 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/1ba072f695804f55b907c986af702435, entries=150, sequenceid=197, filesize=11.9 K 2024-12-08T00:21:48,231 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for cc3ed2949e0e40ebaa106781844b31d7 in 850ms, sequenceid=197, compaction requested=true 2024-12-08T00:21:48,231 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cc3ed2949e0e40ebaa106781844b31d7: 2024-12-08T00:21:48,231 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc3ed2949e0e40ebaa106781844b31d7:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:21:48,231 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:48,231 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc3ed2949e0e40ebaa106781844b31d7:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T00:21:48,231 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:21:48,231 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:48,231 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc3ed2949e0e40ebaa106781844b31d7:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:21:48,231 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:21:48,231 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:21:48,232 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39185 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:21:48,232 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:21:48,232 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): cc3ed2949e0e40ebaa106781844b31d7/A is initiating minor compaction (all files) 2024-12-08T00:21:48,232 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): cc3ed2949e0e40ebaa106781844b31d7/B is initiating minor compaction (all files) 2024-12-08T00:21:48,232 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc3ed2949e0e40ebaa106781844b31d7/A in TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:48,232 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc3ed2949e0e40ebaa106781844b31d7/B in TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:48,232 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/3dd7cd84b6e3425082d67af99c664c07, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/29e2304293394a4393576669541e3170, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/3a271de2808d4e17984938684046a6aa] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp, totalSize=38.3 K 2024-12-08T00:21:48,232 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/dceed96ee6e14023b11b68dd742eb401, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/86b83687f5cf4fa1bfe6268ad55f915f, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/ab48a7f7b21446b1918c41c7f67e7cd7] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp, totalSize=35.9 K 2024-12-08T00:21:48,232 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3dd7cd84b6e3425082d67af99c664c07, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1733617305195 2024-12-08T00:21:48,232 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting dceed96ee6e14023b11b68dd742eb401, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1733617305195 2024-12-08T00:21:48,232 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 29e2304293394a4393576669541e3170, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=166, earliestPutTs=1733617305222 2024-12-08T00:21:48,232 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 86b83687f5cf4fa1bfe6268ad55f915f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=166, earliestPutTs=1733617305222 2024-12-08T00:21:48,233 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3a271de2808d4e17984938684046a6aa, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1733617306337 2024-12-08T00:21:48,233 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting ab48a7f7b21446b1918c41c7f67e7cd7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1733617306337 2024-12-08T00:21:48,240 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc3ed2949e0e40ebaa106781844b31d7#A#compaction#491 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:48,240 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc3ed2949e0e40ebaa106781844b31d7#B#compaction#492 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:48,240 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/867c171c88eb4e7ab33aa15127ee3480 is 50, key is test_row_0/A:col10/1733617307380/Put/seqid=0 2024-12-08T00:21:48,241 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/0bb5e73814884ae7a728a281702286bd is 50, key is test_row_0/B:col10/1733617307380/Put/seqid=0 2024-12-08T00:21:48,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742411_1587 (size=12595) 2024-12-08T00:21:48,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742412_1588 (size=12595) 2024-12-08T00:21:48,256 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/867c171c88eb4e7ab33aa15127ee3480 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/867c171c88eb4e7ab33aa15127ee3480 2024-12-08T00:21:48,260 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc3ed2949e0e40ebaa106781844b31d7/A of cc3ed2949e0e40ebaa106781844b31d7 into 867c171c88eb4e7ab33aa15127ee3480(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:48,260 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc3ed2949e0e40ebaa106781844b31d7: 2024-12-08T00:21:48,260 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7., storeName=cc3ed2949e0e40ebaa106781844b31d7/A, priority=13, startTime=1733617308231; duration=0sec 2024-12-08T00:21:48,260 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:21:48,260 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc3ed2949e0e40ebaa106781844b31d7:A 2024-12-08T00:21:48,260 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:21:48,261 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:21:48,261 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): cc3ed2949e0e40ebaa106781844b31d7/C is initiating minor compaction (all files) 2024-12-08T00:21:48,261 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc3ed2949e0e40ebaa106781844b31d7/C in TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:48,261 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/df8c88d68f394d50a500b55dc67ff903, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/28600b4e0e4649f5ba012303553b5881, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/1ba072f695804f55b907c986af702435] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp, totalSize=35.9 K 2024-12-08T00:21:48,261 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting df8c88d68f394d50a500b55dc67ff903, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1733617305195 2024-12-08T00:21:48,261 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 28600b4e0e4649f5ba012303553b5881, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=166, earliestPutTs=1733617305222 2024-12-08T00:21:48,262 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1ba072f695804f55b907c986af702435, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1733617306337 2024-12-08T00:21:48,267 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc3ed2949e0e40ebaa106781844b31d7#C#compaction#493 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:48,268 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/6bfd37ba71dc4e99942b3c872998c4b1 is 50, key is test_row_0/C:col10/1733617307380/Put/seqid=0 2024-12-08T00:21:48,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742413_1589 (size=12595) 2024-12-08T00:21:48,289 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/6bfd37ba71dc4e99942b3c872998c4b1 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/6bfd37ba71dc4e99942b3c872998c4b1 2024-12-08T00:21:48,291 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:48,291 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-12-08T00:21:48,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:48,292 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2837): Flushing cc3ed2949e0e40ebaa106781844b31d7 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-08T00:21:48,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=A 2024-12-08T00:21:48,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:48,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=B 2024-12-08T00:21:48,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:48,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=C 2024-12-08T00:21:48,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:48,296 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc3ed2949e0e40ebaa106781844b31d7/C of cc3ed2949e0e40ebaa106781844b31d7 into 6bfd37ba71dc4e99942b3c872998c4b1(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:48,296 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc3ed2949e0e40ebaa106781844b31d7: 2024-12-08T00:21:48,296 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7., storeName=cc3ed2949e0e40ebaa106781844b31d7/C, priority=13, startTime=1733617308231; duration=0sec 2024-12-08T00:21:48,296 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:48,296 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc3ed2949e0e40ebaa106781844b31d7:C 2024-12-08T00:21:48,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/cbed7f0cb2af4f0f88e2a7fc05caa7ff is 50, key is test_row_0/A:col10/1733617307386/Put/seqid=0 2024-12-08T00:21:48,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742414_1590 (size=12151) 2024-12-08T00:21:48,507 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. as already flushing 2024-12-08T00:21:48,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on cc3ed2949e0e40ebaa106781844b31d7 2024-12-08T00:21:48,566 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:48,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54024 deadline: 1733617368559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:48,566 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:48,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54018 deadline: 1733617368560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:48,567 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:48,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54004 deadline: 1733617368560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:48,569 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:48,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54014 deadline: 1733617368566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:48,571 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:48,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54040 deadline: 1733617368566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:48,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-08T00:21:48,656 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/0bb5e73814884ae7a728a281702286bd as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/0bb5e73814884ae7a728a281702286bd 2024-12-08T00:21:48,660 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc3ed2949e0e40ebaa106781844b31d7/B of cc3ed2949e0e40ebaa106781844b31d7 into 0bb5e73814884ae7a728a281702286bd(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:48,660 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc3ed2949e0e40ebaa106781844b31d7: 2024-12-08T00:21:48,660 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7., storeName=cc3ed2949e0e40ebaa106781844b31d7/B, priority=13, startTime=1733617308231; duration=0sec 2024-12-08T00:21:48,660 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:48,660 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc3ed2949e0e40ebaa106781844b31d7:B 2024-12-08T00:21:48,670 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:48,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54024 deadline: 1733617368667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:48,670 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:48,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54018 deadline: 1733617368668, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:48,671 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:48,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54004 deadline: 1733617368668, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:48,671 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:48,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54014 deadline: 1733617368669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:48,676 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:48,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54040 deadline: 1733617368672, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:48,710 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=206 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/cbed7f0cb2af4f0f88e2a7fc05caa7ff 2024-12-08T00:21:48,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/76c2784e183349168258245561ddc22d is 50, key is test_row_0/B:col10/1733617307386/Put/seqid=0 2024-12-08T00:21:48,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742415_1591 (size=12151) 2024-12-08T00:21:48,875 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:48,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54018 deadline: 1733617368871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:48,875 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:48,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54014 deadline: 1733617368872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:48,876 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:48,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54024 deadline: 1733617368872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:48,876 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:48,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54004 deadline: 1733617368873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:48,881 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:48,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54040 deadline: 1733617368878, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:49,121 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=206 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/76c2784e183349168258245561ddc22d 2024-12-08T00:21:49,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/6f460221997942e981a11fdfa6ff542b is 50, key is test_row_0/C:col10/1733617307386/Put/seqid=0 2024-12-08T00:21:49,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742416_1592 (size=12151) 2024-12-08T00:21:49,181 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:49,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54014 deadline: 1733617369176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:49,182 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:49,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54004 deadline: 1733617369177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:49,183 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:49,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54018 deadline: 1733617369178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:49,183 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:49,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54024 deadline: 1733617369178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:49,189 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:49,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54040 deadline: 1733617369184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:49,532 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=206 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/6f460221997942e981a11fdfa6ff542b 2024-12-08T00:21:49,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/cbed7f0cb2af4f0f88e2a7fc05caa7ff as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/cbed7f0cb2af4f0f88e2a7fc05caa7ff 2024-12-08T00:21:49,539 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/cbed7f0cb2af4f0f88e2a7fc05caa7ff, entries=150, sequenceid=206, filesize=11.9 K 2024-12-08T00:21:49,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/76c2784e183349168258245561ddc22d as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/76c2784e183349168258245561ddc22d 2024-12-08T00:21:49,543 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/76c2784e183349168258245561ddc22d, entries=150, sequenceid=206, filesize=11.9 K 2024-12-08T00:21:49,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/6f460221997942e981a11fdfa6ff542b as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/6f460221997942e981a11fdfa6ff542b 2024-12-08T00:21:49,547 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/6f460221997942e981a11fdfa6ff542b, entries=150, sequenceid=206, filesize=11.9 K 2024-12-08T00:21:49,548 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=167.72 KB/171750 for cc3ed2949e0e40ebaa106781844b31d7 in 1256ms, sequenceid=206, compaction requested=false 2024-12-08T00:21:49,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2538): Flush status journal for cc3ed2949e0e40ebaa106781844b31d7: 2024-12-08T00:21:49,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:49,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=143 2024-12-08T00:21:49,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4106): Remote procedure done, pid=143 2024-12-08T00:21:49,550 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=143, resume processing ppid=142 2024-12-08T00:21:49,550 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=143, ppid=142, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0190 sec 2024-12-08T00:21:49,551 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=142, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees in 2.0220 sec 2024-12-08T00:21:49,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-08T00:21:49,633 INFO [Thread-2403 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 142 completed 2024-12-08T00:21:49,635 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T00:21:49,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=144, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees 2024-12-08T00:21:49,636 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=144, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:21:49,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-08T00:21:49,637 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=144, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:21:49,637 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=145, ppid=144, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:21:49,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on cc3ed2949e0e40ebaa106781844b31d7 2024-12-08T00:21:49,687 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cc3ed2949e0e40ebaa106781844b31d7 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-12-08T00:21:49,688 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=A 2024-12-08T00:21:49,688 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:49,688 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=B 2024-12-08T00:21:49,688 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:49,688 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=C 2024-12-08T00:21:49,688 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:49,693 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/15b91b253f50405bb209b402cbc7c48e is 50, key is test_row_0/A:col10/1733617309685/Put/seqid=0 2024-12-08T00:21:49,693 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:49,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54018 deadline: 1733617369688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:49,693 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:49,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54014 deadline: 1733617369690, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:49,694 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:49,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54040 deadline: 1733617369692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:49,695 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:49,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54024 deadline: 1733617369692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:49,695 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:49,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54004 deadline: 1733617369693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:49,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742417_1593 (size=16931) 2024-12-08T00:21:49,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-08T00:21:49,788 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:49,788 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-12-08T00:21:49,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:49,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. as already flushing 2024-12-08T00:21:49,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:49,789 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:49,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:49,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:49,797 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:49,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54018 deadline: 1733617369794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:49,798 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:49,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54014 deadline: 1733617369794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:49,799 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:49,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54024 deadline: 1733617369796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:49,799 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:49,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54004 deadline: 1733617369796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:49,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-08T00:21:49,940 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:49,941 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-12-08T00:21:49,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:49,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. as already flushing 2024-12-08T00:21:49,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:49,941 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:49,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:49,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:50,001 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:50,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54018 deadline: 1733617369999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:50,002 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:50,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54014 deadline: 1733617370000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:50,002 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:50,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54004 deadline: 1733617370000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:50,003 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:50,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54024 deadline: 1733617370001, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:50,093 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:50,093 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-12-08T00:21:50,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:50,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. as already flushing 2024-12-08T00:21:50,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:50,094 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:50,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:50,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:50,098 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=238 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/15b91b253f50405bb209b402cbc7c48e 2024-12-08T00:21:50,111 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/a138c98f153749508d119ee19943633b is 50, key is test_row_0/B:col10/1733617309685/Put/seqid=0 2024-12-08T00:21:50,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742418_1594 (size=12151) 2024-12-08T00:21:50,118 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=238 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/a138c98f153749508d119ee19943633b 2024-12-08T00:21:50,125 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/f89d084884e9435aba23ad48a06f5fc6 is 50, key is test_row_0/C:col10/1733617309685/Put/seqid=0 2024-12-08T00:21:50,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742419_1595 (size=12151) 2024-12-08T00:21:50,129 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=238 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/f89d084884e9435aba23ad48a06f5fc6 2024-12-08T00:21:50,132 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/15b91b253f50405bb209b402cbc7c48e as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/15b91b253f50405bb209b402cbc7c48e 2024-12-08T00:21:50,136 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/15b91b253f50405bb209b402cbc7c48e, entries=250, sequenceid=238, filesize=16.5 K 2024-12-08T00:21:50,137 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/a138c98f153749508d119ee19943633b as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/a138c98f153749508d119ee19943633b 2024-12-08T00:21:50,140 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/a138c98f153749508d119ee19943633b, entries=150, sequenceid=238, filesize=11.9 K 2024-12-08T00:21:50,141 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/f89d084884e9435aba23ad48a06f5fc6 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/f89d084884e9435aba23ad48a06f5fc6 2024-12-08T00:21:50,145 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/f89d084884e9435aba23ad48a06f5fc6, entries=150, sequenceid=238, filesize=11.9 K 2024-12-08T00:21:50,145 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~181.14 KB/185490, heapSize ~475.31 KB/486720, currentSize=20.13 KB/20610 for cc3ed2949e0e40ebaa106781844b31d7 in 458ms, sequenceid=238, compaction requested=true 2024-12-08T00:21:50,145 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cc3ed2949e0e40ebaa106781844b31d7: 2024-12-08T00:21:50,145 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc3ed2949e0e40ebaa106781844b31d7:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:21:50,145 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:50,145 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc3ed2949e0e40ebaa106781844b31d7:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T00:21:50,145 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:21:50,146 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:50,146 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc3ed2949e0e40ebaa106781844b31d7:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:21:50,146 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:21:50,146 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:21:50,147 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 41677 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:21:50,147 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:21:50,147 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): cc3ed2949e0e40ebaa106781844b31d7/A is initiating minor compaction (all files) 2024-12-08T00:21:50,147 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): cc3ed2949e0e40ebaa106781844b31d7/B is initiating minor compaction (all files) 2024-12-08T00:21:50,148 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc3ed2949e0e40ebaa106781844b31d7/A in TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:50,148 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc3ed2949e0e40ebaa106781844b31d7/B in TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:50,148 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/867c171c88eb4e7ab33aa15127ee3480, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/cbed7f0cb2af4f0f88e2a7fc05caa7ff, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/15b91b253f50405bb209b402cbc7c48e] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp, totalSize=40.7 K 2024-12-08T00:21:50,148 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/0bb5e73814884ae7a728a281702286bd, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/76c2784e183349168258245561ddc22d, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/a138c98f153749508d119ee19943633b] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp, totalSize=36.0 K 2024-12-08T00:21:50,148 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 0bb5e73814884ae7a728a281702286bd, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1733617306337 2024-12-08T00:21:50,148 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 867c171c88eb4e7ab33aa15127ee3480, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1733617306337 2024-12-08T00:21:50,148 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 76c2784e183349168258245561ddc22d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1733617307386 2024-12-08T00:21:50,148 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting cbed7f0cb2af4f0f88e2a7fc05caa7ff, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1733617307386 2024-12-08T00:21:50,149 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting a138c98f153749508d119ee19943633b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1733617308532 2024-12-08T00:21:50,149 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 15b91b253f50405bb209b402cbc7c48e, keycount=250, bloomtype=ROW, size=16.5 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1733617308532 2024-12-08T00:21:50,158 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc3ed2949e0e40ebaa106781844b31d7#B#compaction#500 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:50,158 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc3ed2949e0e40ebaa106781844b31d7#A#compaction#501 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:50,159 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/368ecbe6a18a48b8936114aacb43b8d1 is 50, key is test_row_0/B:col10/1733617309685/Put/seqid=0 2024-12-08T00:21:50,159 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/1eda07df78284467bac1db8059449737 is 50, key is test_row_0/A:col10/1733617309685/Put/seqid=0 2024-12-08T00:21:50,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742421_1597 (size=12697) 2024-12-08T00:21:50,183 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/368ecbe6a18a48b8936114aacb43b8d1 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/368ecbe6a18a48b8936114aacb43b8d1 2024-12-08T00:21:50,189 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc3ed2949e0e40ebaa106781844b31d7/B of cc3ed2949e0e40ebaa106781844b31d7 into 368ecbe6a18a48b8936114aacb43b8d1(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:50,189 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc3ed2949e0e40ebaa106781844b31d7: 2024-12-08T00:21:50,189 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7., storeName=cc3ed2949e0e40ebaa106781844b31d7/B, priority=13, startTime=1733617310145; duration=0sec 2024-12-08T00:21:50,189 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:21:50,189 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc3ed2949e0e40ebaa106781844b31d7:B 2024-12-08T00:21:50,189 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:21:50,190 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:21:50,190 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): cc3ed2949e0e40ebaa106781844b31d7/C is initiating minor compaction (all files) 2024-12-08T00:21:50,190 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc3ed2949e0e40ebaa106781844b31d7/C in TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:50,190 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/6bfd37ba71dc4e99942b3c872998c4b1, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/6f460221997942e981a11fdfa6ff542b, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/f89d084884e9435aba23ad48a06f5fc6] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp, totalSize=36.0 K 2024-12-08T00:21:50,190 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 6bfd37ba71dc4e99942b3c872998c4b1, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1733617306337 2024-12-08T00:21:50,191 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 6f460221997942e981a11fdfa6ff542b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1733617307386 2024-12-08T00:21:50,191 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting f89d084884e9435aba23ad48a06f5fc6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1733617308532 2024-12-08T00:21:50,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742420_1596 (size=12697) 2024-12-08T00:21:50,200 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc3ed2949e0e40ebaa106781844b31d7#C#compaction#502 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:50,201 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/c34d976733d94595883e752a35be6a84 is 50, key is test_row_0/C:col10/1733617309685/Put/seqid=0 2024-12-08T00:21:50,206 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/1eda07df78284467bac1db8059449737 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/1eda07df78284467bac1db8059449737 2024-12-08T00:21:50,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742422_1598 (size=12697) 2024-12-08T00:21:50,213 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc3ed2949e0e40ebaa106781844b31d7/A of cc3ed2949e0e40ebaa106781844b31d7 into 1eda07df78284467bac1db8059449737(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:50,213 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc3ed2949e0e40ebaa106781844b31d7: 2024-12-08T00:21:50,213 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7., storeName=cc3ed2949e0e40ebaa106781844b31d7/A, priority=13, startTime=1733617310145; duration=0sec 2024-12-08T00:21:50,213 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:50,213 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc3ed2949e0e40ebaa106781844b31d7:A 2024-12-08T00:21:50,219 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/c34d976733d94595883e752a35be6a84 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/c34d976733d94595883e752a35be6a84 2024-12-08T00:21:50,225 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc3ed2949e0e40ebaa106781844b31d7/C of cc3ed2949e0e40ebaa106781844b31d7 into c34d976733d94595883e752a35be6a84(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:50,225 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc3ed2949e0e40ebaa106781844b31d7: 2024-12-08T00:21:50,225 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7., storeName=cc3ed2949e0e40ebaa106781844b31d7/C, priority=13, startTime=1733617310146; duration=0sec 2024-12-08T00:21:50,225 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:50,225 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc3ed2949e0e40ebaa106781844b31d7:C 2024-12-08T00:21:50,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-08T00:21:50,246 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:50,247 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-12-08T00:21:50,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:50,248 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2837): Flushing cc3ed2949e0e40ebaa106781844b31d7 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-12-08T00:21:50,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=A 2024-12-08T00:21:50,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:50,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=B 2024-12-08T00:21:50,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:50,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=C 2024-12-08T00:21:50,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:50,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/a1febb0d373d4835b3651feeeebff1dc is 50, key is test_row_0/A:col10/1733617309688/Put/seqid=0 2024-12-08T00:21:50,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742423_1599 (size=12151) 2024-12-08T00:21:50,258 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=247 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/a1febb0d373d4835b3651feeeebff1dc 2024-12-08T00:21:50,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/d24a3ad468424338a70b6e04c4606ddf is 50, key is test_row_0/B:col10/1733617309688/Put/seqid=0 2024-12-08T00:21:50,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742424_1600 (size=12151) 2024-12-08T00:21:50,275 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=247 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/d24a3ad468424338a70b6e04c4606ddf 2024-12-08T00:21:50,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/77e2bdf1d80a4b2f8f33723bb15121d4 is 50, key is test_row_0/C:col10/1733617309688/Put/seqid=0 2024-12-08T00:21:50,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742425_1601 (size=12151) 2024-12-08T00:21:50,312 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. as already flushing 2024-12-08T00:21:50,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on cc3ed2949e0e40ebaa106781844b31d7 2024-12-08T00:21:50,352 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:50,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54004 deadline: 1733617370350, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:50,357 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:50,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54024 deadline: 1733617370351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:50,359 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:50,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54018 deadline: 1733617370351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:50,359 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:50,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54014 deadline: 1733617370352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:50,457 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:50,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54004 deadline: 1733617370453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:50,461 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:50,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54024 deadline: 1733617370458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:50,463 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:50,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54018 deadline: 1733617370460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:50,464 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:50,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54014 deadline: 1733617370460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:50,660 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:50,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54004 deadline: 1733617370658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:50,664 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:50,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54024 deadline: 1733617370662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:50,668 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:50,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54018 deadline: 1733617370665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:50,668 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:50,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54014 deadline: 1733617370666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:50,688 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=247 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/77e2bdf1d80a4b2f8f33723bb15121d4 2024-12-08T00:21:50,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/a1febb0d373d4835b3651feeeebff1dc as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/a1febb0d373d4835b3651feeeebff1dc 2024-12-08T00:21:50,695 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/a1febb0d373d4835b3651feeeebff1dc, entries=150, sequenceid=247, filesize=11.9 K 2024-12-08T00:21:50,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/d24a3ad468424338a70b6e04c4606ddf as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/d24a3ad468424338a70b6e04c4606ddf 2024-12-08T00:21:50,699 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/d24a3ad468424338a70b6e04c4606ddf, entries=150, sequenceid=247, filesize=11.9 K 2024-12-08T00:21:50,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/77e2bdf1d80a4b2f8f33723bb15121d4 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/77e2bdf1d80a4b2f8f33723bb15121d4 2024-12-08T00:21:50,703 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/77e2bdf1d80a4b2f8f33723bb15121d4, entries=150, sequenceid=247, filesize=11.9 K 2024-12-08T00:21:50,703 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=181.14 KB/185490 for cc3ed2949e0e40ebaa106781844b31d7 in 455ms, sequenceid=247, compaction requested=false 2024-12-08T00:21:50,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2538): Flush status journal for cc3ed2949e0e40ebaa106781844b31d7: 2024-12-08T00:21:50,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:50,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=145 2024-12-08T00:21:50,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4106): Remote procedure done, pid=145 2024-12-08T00:21:50,706 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=145, resume processing ppid=144 2024-12-08T00:21:50,706 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=145, ppid=144, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0680 sec 2024-12-08T00:21:50,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on cc3ed2949e0e40ebaa106781844b31d7 2024-12-08T00:21:50,708 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cc3ed2949e0e40ebaa106781844b31d7 3/3 column families, dataSize=187.85 KB heapSize=492.94 KB 2024-12-08T00:21:50,708 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=A 2024-12-08T00:21:50,708 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:50,708 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=B 2024-12-08T00:21:50,708 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:50,708 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=C 2024-12-08T00:21:50,708 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:50,708 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=144, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees in 1.0720 sec 2024-12-08T00:21:50,712 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/3e7ca6c33a2c4d52901669e3e1d57b10 is 50, key is test_row_0/A:col10/1733617310706/Put/seqid=0 2024-12-08T00:21:50,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742426_1602 (size=14741) 2024-12-08T00:21:50,716 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=62.62 KB at sequenceid=278 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/3e7ca6c33a2c4d52901669e3e1d57b10 2024-12-08T00:21:50,719 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:50,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54040 deadline: 1733617370716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:50,727 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/bd55b8bb33fa4fa69cf8d8974b21ed9d is 50, key is test_row_0/B:col10/1733617310706/Put/seqid=0 2024-12-08T00:21:50,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742427_1603 (size=12301) 2024-12-08T00:21:50,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-08T00:21:50,739 INFO [Thread-2403 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 144 completed 2024-12-08T00:21:50,740 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T00:21:50,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=146, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees 2024-12-08T00:21:50,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-12-08T00:21:50,742 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=146, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:21:50,742 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=146, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:21:50,742 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=147, ppid=146, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:21:50,824 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:50,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54040 deadline: 1733617370820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:50,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-12-08T00:21:50,893 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:50,894 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-12-08T00:21:50,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:50,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. as already flushing 2024-12-08T00:21:50,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:50,894 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:50,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:50,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:50,967 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:50,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54004 deadline: 1733617370963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:50,969 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:50,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54024 deadline: 1733617370966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:50,973 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:50,973 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:50,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54018 deadline: 1733617370969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:50,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54014 deadline: 1733617370970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:51,031 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:51,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54040 deadline: 1733617371026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:51,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-12-08T00:21:51,046 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:51,046 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-12-08T00:21:51,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:51,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. as already flushing 2024-12-08T00:21:51,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:51,047 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:51,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:51,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:51,134 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=62.62 KB at sequenceid=278 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/bd55b8bb33fa4fa69cf8d8974b21ed9d 2024-12-08T00:21:51,140 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/2554bd4b9061459da1c619deb0848746 is 50, key is test_row_0/C:col10/1733617310706/Put/seqid=0 2024-12-08T00:21:51,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742428_1604 (size=12301) 2024-12-08T00:21:51,145 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=62.62 KB at sequenceid=278 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/2554bd4b9061459da1c619deb0848746 2024-12-08T00:21:51,149 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/3e7ca6c33a2c4d52901669e3e1d57b10 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/3e7ca6c33a2c4d52901669e3e1d57b10 2024-12-08T00:21:51,153 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/3e7ca6c33a2c4d52901669e3e1d57b10, entries=200, sequenceid=278, filesize=14.4 K 2024-12-08T00:21:51,153 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/bd55b8bb33fa4fa69cf8d8974b21ed9d as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/bd55b8bb33fa4fa69cf8d8974b21ed9d 2024-12-08T00:21:51,157 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/bd55b8bb33fa4fa69cf8d8974b21ed9d, entries=150, sequenceid=278, filesize=12.0 K 2024-12-08T00:21:51,158 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/2554bd4b9061459da1c619deb0848746 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/2554bd4b9061459da1c619deb0848746 2024-12-08T00:21:51,160 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/2554bd4b9061459da1c619deb0848746, entries=150, sequenceid=278, filesize=12.0 K 2024-12-08T00:21:51,161 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~187.85 KB/192360, heapSize ~492.89 KB/504720, currentSize=13.42 KB/13740 for cc3ed2949e0e40ebaa106781844b31d7 in 454ms, sequenceid=278, compaction requested=true 2024-12-08T00:21:51,161 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cc3ed2949e0e40ebaa106781844b31d7: 2024-12-08T00:21:51,161 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:21:51,162 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc3ed2949e0e40ebaa106781844b31d7:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:21:51,162 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:51,162 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:21:51,162 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc3ed2949e0e40ebaa106781844b31d7:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T00:21:51,162 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:51,162 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc3ed2949e0e40ebaa106781844b31d7:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:21:51,162 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:21:51,162 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39589 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:21:51,163 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): cc3ed2949e0e40ebaa106781844b31d7/A is initiating minor compaction (all files) 2024-12-08T00:21:51,163 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc3ed2949e0e40ebaa106781844b31d7/A in TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:51,163 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/1eda07df78284467bac1db8059449737, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/a1febb0d373d4835b3651feeeebff1dc, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/3e7ca6c33a2c4d52901669e3e1d57b10] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp, totalSize=38.7 K 2024-12-08T00:21:51,163 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:21:51,163 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): cc3ed2949e0e40ebaa106781844b31d7/B is initiating minor compaction (all files) 2024-12-08T00:21:51,163 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc3ed2949e0e40ebaa106781844b31d7/B in TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:51,163 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/368ecbe6a18a48b8936114aacb43b8d1, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/d24a3ad468424338a70b6e04c4606ddf, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/bd55b8bb33fa4fa69cf8d8974b21ed9d] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp, totalSize=36.3 K 2024-12-08T00:21:51,163 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1eda07df78284467bac1db8059449737, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1733617308532 2024-12-08T00:21:51,163 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 368ecbe6a18a48b8936114aacb43b8d1, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1733617308532 2024-12-08T00:21:51,164 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting a1febb0d373d4835b3651feeeebff1dc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1733617309688 2024-12-08T00:21:51,164 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting d24a3ad468424338a70b6e04c4606ddf, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1733617309688 2024-12-08T00:21:51,164 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3e7ca6c33a2c4d52901669e3e1d57b10, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1733617310341 2024-12-08T00:21:51,164 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting bd55b8bb33fa4fa69cf8d8974b21ed9d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1733617310341 2024-12-08T00:21:51,174 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc3ed2949e0e40ebaa106781844b31d7#B#compaction#509 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:51,174 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc3ed2949e0e40ebaa106781844b31d7#A#compaction#510 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:51,175 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/e6bf070b90d14466bba0b4fd264220e9 is 50, key is test_row_0/A:col10/1733617310706/Put/seqid=0 2024-12-08T00:21:51,175 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/f8a6a5cb40f74ef694999192a8c59f9f is 50, key is test_row_0/B:col10/1733617310706/Put/seqid=0 2024-12-08T00:21:51,198 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:51,198 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-12-08T00:21:51,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:51,199 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2837): Flushing cc3ed2949e0e40ebaa106781844b31d7 3/3 column families, dataSize=13.42 KB heapSize=35.91 KB 2024-12-08T00:21:51,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=A 2024-12-08T00:21:51,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:51,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=B 2024-12-08T00:21:51,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:51,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=C 2024-12-08T00:21:51,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:51,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742429_1605 (size=12949) 2024-12-08T00:21:51,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/5280b4c6e10a4405a7356c3a9f2cce99 is 50, key is test_row_0/A:col10/1733617310709/Put/seqid=0 2024-12-08T00:21:51,209 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/e6bf070b90d14466bba0b4fd264220e9 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/e6bf070b90d14466bba0b4fd264220e9 2024-12-08T00:21:51,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742430_1606 (size=12949) 2024-12-08T00:21:51,216 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc3ed2949e0e40ebaa106781844b31d7/A of cc3ed2949e0e40ebaa106781844b31d7 into e6bf070b90d14466bba0b4fd264220e9(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:51,216 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc3ed2949e0e40ebaa106781844b31d7: 2024-12-08T00:21:51,216 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7., storeName=cc3ed2949e0e40ebaa106781844b31d7/A, priority=13, startTime=1733617311161; duration=0sec 2024-12-08T00:21:51,216 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:21:51,216 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc3ed2949e0e40ebaa106781844b31d7:A 2024-12-08T00:21:51,216 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:21:51,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742431_1607 (size=9857) 2024-12-08T00:21:51,220 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:21:51,220 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): cc3ed2949e0e40ebaa106781844b31d7/C is initiating minor compaction (all files) 2024-12-08T00:21:51,220 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc3ed2949e0e40ebaa106781844b31d7/C in TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:51,220 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/c34d976733d94595883e752a35be6a84, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/77e2bdf1d80a4b2f8f33723bb15121d4, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/2554bd4b9061459da1c619deb0848746] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp, totalSize=36.3 K 2024-12-08T00:21:51,221 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting c34d976733d94595883e752a35be6a84, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1733617308532 2024-12-08T00:21:51,221 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 77e2bdf1d80a4b2f8f33723bb15121d4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1733617309688 2024-12-08T00:21:51,222 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2554bd4b9061459da1c619deb0848746, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1733617310341 2024-12-08T00:21:51,226 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/f8a6a5cb40f74ef694999192a8c59f9f as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/f8a6a5cb40f74ef694999192a8c59f9f 2024-12-08T00:21:51,232 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc3ed2949e0e40ebaa106781844b31d7#C#compaction#512 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:51,233 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/9b30491cb81e4eef9f20300fc849dff4 is 50, key is test_row_0/C:col10/1733617310706/Put/seqid=0 2024-12-08T00:21:51,235 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc3ed2949e0e40ebaa106781844b31d7/B of cc3ed2949e0e40ebaa106781844b31d7 into f8a6a5cb40f74ef694999192a8c59f9f(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:51,235 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc3ed2949e0e40ebaa106781844b31d7: 2024-12-08T00:21:51,235 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7., storeName=cc3ed2949e0e40ebaa106781844b31d7/B, priority=13, startTime=1733617311162; duration=0sec 2024-12-08T00:21:51,235 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:51,235 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc3ed2949e0e40ebaa106781844b31d7:B 2024-12-08T00:21:51,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742432_1608 (size=12949) 2024-12-08T00:21:51,263 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/9b30491cb81e4eef9f20300fc849dff4 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/9b30491cb81e4eef9f20300fc849dff4 2024-12-08T00:21:51,269 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc3ed2949e0e40ebaa106781844b31d7/C of cc3ed2949e0e40ebaa106781844b31d7 into 9b30491cb81e4eef9f20300fc849dff4(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:51,269 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc3ed2949e0e40ebaa106781844b31d7: 2024-12-08T00:21:51,269 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7., storeName=cc3ed2949e0e40ebaa106781844b31d7/C, priority=13, startTime=1733617311162; duration=0sec 2024-12-08T00:21:51,269 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:51,269 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc3ed2949e0e40ebaa106781844b31d7:C 2024-12-08T00:21:51,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-12-08T00:21:51,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on cc3ed2949e0e40ebaa106781844b31d7 2024-12-08T00:21:51,345 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. as already flushing 2024-12-08T00:21:51,435 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:51,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54040 deadline: 1733617371430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:51,472 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:51,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54004 deadline: 1733617371468, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:51,472 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:51,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54024 deadline: 1733617371470, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:51,479 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:51,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54018 deadline: 1733617371477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:51,481 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:51,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54014 deadline: 1733617371478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:51,540 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:51,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54040 deadline: 1733617371536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:51,619 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.47 KB at sequenceid=283 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/5280b4c6e10a4405a7356c3a9f2cce99 2024-12-08T00:21:51,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/ca2466d52c0240a4a9d3c940c561a13d is 50, key is test_row_0/B:col10/1733617310709/Put/seqid=0 2024-12-08T00:21:51,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742433_1609 (size=9857) 2024-12-08T00:21:51,638 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.47 KB at sequenceid=283 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/ca2466d52c0240a4a9d3c940c561a13d 2024-12-08T00:21:51,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/18b753250d7a44b98385bf925d42d5cf is 50, key is test_row_0/C:col10/1733617310709/Put/seqid=0 2024-12-08T00:21:51,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742434_1610 (size=9857) 2024-12-08T00:21:51,747 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:51,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54040 deadline: 1733617371742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:51,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-12-08T00:21:52,049 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:52,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54040 deadline: 1733617372049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:52,062 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.47 KB at sequenceid=283 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/18b753250d7a44b98385bf925d42d5cf 2024-12-08T00:21:52,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/5280b4c6e10a4405a7356c3a9f2cce99 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/5280b4c6e10a4405a7356c3a9f2cce99 2024-12-08T00:21:52,069 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/5280b4c6e10a4405a7356c3a9f2cce99, entries=100, sequenceid=283, filesize=9.6 K 2024-12-08T00:21:52,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/ca2466d52c0240a4a9d3c940c561a13d as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/ca2466d52c0240a4a9d3c940c561a13d 2024-12-08T00:21:52,072 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/ca2466d52c0240a4a9d3c940c561a13d, entries=100, sequenceid=283, filesize=9.6 K 2024-12-08T00:21:52,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/18b753250d7a44b98385bf925d42d5cf as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/18b753250d7a44b98385bf925d42d5cf 2024-12-08T00:21:52,076 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/18b753250d7a44b98385bf925d42d5cf, entries=100, sequenceid=283, filesize=9.6 K 2024-12-08T00:21:52,076 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(3040): Finished flush of dataSize ~13.42 KB/13740, heapSize ~35.86 KB/36720, currentSize=187.85 KB/192360 for cc3ed2949e0e40ebaa106781844b31d7 in 877ms, sequenceid=283, compaction requested=false 2024-12-08T00:21:52,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2538): Flush status journal for cc3ed2949e0e40ebaa106781844b31d7: 2024-12-08T00:21:52,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:52,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=147 2024-12-08T00:21:52,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4106): Remote procedure done, pid=147 2024-12-08T00:21:52,081 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=147, resume processing ppid=146 2024-12-08T00:21:52,081 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=147, ppid=146, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3380 sec 2024-12-08T00:21:52,082 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=146, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees in 1.3420 sec 2024-12-08T00:21:52,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on cc3ed2949e0e40ebaa106781844b31d7 2024-12-08T00:21:52,476 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cc3ed2949e0e40ebaa106781844b31d7 3/3 column families, dataSize=194.56 KB heapSize=510.52 KB 2024-12-08T00:21:52,476 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=A 2024-12-08T00:21:52,476 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:52,476 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=B 2024-12-08T00:21:52,476 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:52,476 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=C 2024-12-08T00:21:52,476 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:52,478 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:52,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54024 deadline: 1733617372476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:52,481 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:52,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54004 deadline: 1733617372478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:52,482 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/8a00e2e8eee146d0849d7f2902f3d8e7 is 50, key is test_row_0/A:col10/1733617311390/Put/seqid=0 2024-12-08T00:21:52,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742435_1611 (size=14741) 2024-12-08T00:21:52,487 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=64.85 KB at sequenceid=318 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/8a00e2e8eee146d0849d7f2902f3d8e7 2024-12-08T00:21:52,487 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:52,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54014 deadline: 1733617372485, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:52,489 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:52,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54018 deadline: 1733617372487, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:52,494 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/13488325e624444181742d8691843154 is 50, key is test_row_0/B:col10/1733617311390/Put/seqid=0 2024-12-08T00:21:52,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742436_1612 (size=12301) 2024-12-08T00:21:52,556 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:52,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54040 deadline: 1733617372554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:52,582 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:52,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54024 deadline: 1733617372579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:52,584 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:52,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54004 deadline: 1733617372582, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:52,787 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:52,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54024 deadline: 1733617372783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:52,787 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:52,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54004 deadline: 1733617372785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:52,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-12-08T00:21:52,846 INFO [Thread-2403 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 146 completed 2024-12-08T00:21:52,847 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T00:21:52,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=148, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees 2024-12-08T00:21:52,849 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=148, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:21:52,849 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=148, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:21:52,849 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=149, ppid=148, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:21:52,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-12-08T00:21:52,899 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=64.85 KB at sequenceid=318 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/13488325e624444181742d8691843154 2024-12-08T00:21:52,910 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/393393e381814adb8a7a8c7f8f7b9089 is 50, key is test_row_0/C:col10/1733617311390/Put/seqid=0 2024-12-08T00:21:52,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742437_1613 (size=12301) 2024-12-08T00:21:52,920 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=64.85 KB at sequenceid=318 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/393393e381814adb8a7a8c7f8f7b9089 2024-12-08T00:21:52,925 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/8a00e2e8eee146d0849d7f2902f3d8e7 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/8a00e2e8eee146d0849d7f2902f3d8e7 2024-12-08T00:21:52,929 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/8a00e2e8eee146d0849d7f2902f3d8e7, entries=200, sequenceid=318, filesize=14.4 K 2024-12-08T00:21:52,929 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/13488325e624444181742d8691843154 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/13488325e624444181742d8691843154 2024-12-08T00:21:52,934 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/13488325e624444181742d8691843154, entries=150, sequenceid=318, filesize=12.0 K 2024-12-08T00:21:52,935 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/393393e381814adb8a7a8c7f8f7b9089 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/393393e381814adb8a7a8c7f8f7b9089 2024-12-08T00:21:52,938 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/393393e381814adb8a7a8c7f8f7b9089, entries=150, sequenceid=318, filesize=12.0 K 2024-12-08T00:21:52,940 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~194.56 KB/199230, heapSize ~510.47 KB/522720, currentSize=6.71 KB/6870 for cc3ed2949e0e40ebaa106781844b31d7 in 464ms, sequenceid=318, compaction requested=true 2024-12-08T00:21:52,940 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cc3ed2949e0e40ebaa106781844b31d7: 2024-12-08T00:21:52,940 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc3ed2949e0e40ebaa106781844b31d7:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:21:52,940 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:52,940 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc3ed2949e0e40ebaa106781844b31d7:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T00:21:52,940 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:52,940 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:21:52,940 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc3ed2949e0e40ebaa106781844b31d7:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:21:52,940 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:21:52,940 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:21:52,941 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37547 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:21:52,941 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): cc3ed2949e0e40ebaa106781844b31d7/A is initiating minor compaction (all files) 2024-12-08T00:21:52,941 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35107 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:21:52,941 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc3ed2949e0e40ebaa106781844b31d7/A in TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:52,941 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): cc3ed2949e0e40ebaa106781844b31d7/B is initiating minor compaction (all files) 2024-12-08T00:21:52,941 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc3ed2949e0e40ebaa106781844b31d7/B in TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:52,941 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/e6bf070b90d14466bba0b4fd264220e9, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/5280b4c6e10a4405a7356c3a9f2cce99, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/8a00e2e8eee146d0849d7f2902f3d8e7] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp, totalSize=36.7 K 2024-12-08T00:21:52,941 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/f8a6a5cb40f74ef694999192a8c59f9f, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/ca2466d52c0240a4a9d3c940c561a13d, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/13488325e624444181742d8691843154] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp, totalSize=34.3 K 2024-12-08T00:21:52,942 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting f8a6a5cb40f74ef694999192a8c59f9f, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1733617310341 2024-12-08T00:21:52,942 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting e6bf070b90d14466bba0b4fd264220e9, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1733617310341 2024-12-08T00:21:52,942 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting ca2466d52c0240a4a9d3c940c561a13d, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=283, earliestPutTs=1733617310709 2024-12-08T00:21:52,942 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5280b4c6e10a4405a7356c3a9f2cce99, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=283, earliestPutTs=1733617310709 2024-12-08T00:21:52,942 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8a00e2e8eee146d0849d7f2902f3d8e7, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1733617311390 2024-12-08T00:21:52,942 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 13488325e624444181742d8691843154, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1733617311390 2024-12-08T00:21:52,950 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc3ed2949e0e40ebaa106781844b31d7#B#compaction#518 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:52,951 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/ade30619a5b64c14a770fda7a2763835 is 50, key is test_row_0/B:col10/1733617311390/Put/seqid=0 2024-12-08T00:21:52,954 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc3ed2949e0e40ebaa106781844b31d7#A#compaction#519 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:52,954 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/e5898f0dc3f44ba0ad5c5b0f8dfdbd50 is 50, key is test_row_0/A:col10/1733617311390/Put/seqid=0 2024-12-08T00:21:52,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-12-08T00:21:52,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742438_1614 (size=13051) 2024-12-08T00:21:52,961 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/ade30619a5b64c14a770fda7a2763835 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/ade30619a5b64c14a770fda7a2763835 2024-12-08T00:21:52,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742439_1615 (size=13051) 2024-12-08T00:21:52,979 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/e5898f0dc3f44ba0ad5c5b0f8dfdbd50 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/e5898f0dc3f44ba0ad5c5b0f8dfdbd50 2024-12-08T00:21:52,979 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc3ed2949e0e40ebaa106781844b31d7/B of cc3ed2949e0e40ebaa106781844b31d7 into ade30619a5b64c14a770fda7a2763835(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:52,979 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc3ed2949e0e40ebaa106781844b31d7: 2024-12-08T00:21:52,979 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7., storeName=cc3ed2949e0e40ebaa106781844b31d7/B, priority=13, startTime=1733617312940; duration=0sec 2024-12-08T00:21:52,980 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:21:52,980 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc3ed2949e0e40ebaa106781844b31d7:B 2024-12-08T00:21:52,980 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:21:52,982 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35107 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:21:52,982 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): cc3ed2949e0e40ebaa106781844b31d7/C is initiating minor compaction (all files) 2024-12-08T00:21:52,982 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc3ed2949e0e40ebaa106781844b31d7/C in TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:52,982 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/9b30491cb81e4eef9f20300fc849dff4, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/18b753250d7a44b98385bf925d42d5cf, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/393393e381814adb8a7a8c7f8f7b9089] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp, totalSize=34.3 K 2024-12-08T00:21:52,984 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 9b30491cb81e4eef9f20300fc849dff4, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1733617310341 2024-12-08T00:21:52,985 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 18b753250d7a44b98385bf925d42d5cf, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=283, earliestPutTs=1733617310709 2024-12-08T00:21:52,985 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 393393e381814adb8a7a8c7f8f7b9089, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1733617311390 2024-12-08T00:21:52,986 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc3ed2949e0e40ebaa106781844b31d7/A of cc3ed2949e0e40ebaa106781844b31d7 into e5898f0dc3f44ba0ad5c5b0f8dfdbd50(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:52,986 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc3ed2949e0e40ebaa106781844b31d7: 2024-12-08T00:21:52,986 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7., storeName=cc3ed2949e0e40ebaa106781844b31d7/A, priority=13, startTime=1733617312940; duration=0sec 2024-12-08T00:21:52,986 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:52,986 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc3ed2949e0e40ebaa106781844b31d7:A 2024-12-08T00:21:52,992 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc3ed2949e0e40ebaa106781844b31d7#C#compaction#520 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:52,993 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/dc788bd633354034b83c75248e78bd34 is 50, key is test_row_0/C:col10/1733617311390/Put/seqid=0 2024-12-08T00:21:52,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742440_1616 (size=13051) 2024-12-08T00:21:53,001 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:53,001 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-12-08T00:21:53,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:53,002 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2837): Flushing cc3ed2949e0e40ebaa106781844b31d7 3/3 column families, dataSize=6.71 KB heapSize=18.33 KB 2024-12-08T00:21:53,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=A 2024-12-08T00:21:53,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:53,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=B 2024-12-08T00:21:53,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:53,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=C 2024-12-08T00:21:53,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:53,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/f7773ac0bf204e709884cdd46bfc771c is 50, key is test_row_2/A:col10/1733617312477/Put/seqid=0 2024-12-08T00:21:53,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742441_1617 (size=7415) 2024-12-08T00:21:53,012 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.24 KB at sequenceid=324 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/f7773ac0bf204e709884cdd46bfc771c 2024-12-08T00:21:53,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/e558dfeea2584fc0a2ad39478f499fa7 is 50, key is test_row_2/B:col10/1733617312477/Put/seqid=0 2024-12-08T00:21:53,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742442_1618 (size=7415) 2024-12-08T00:21:53,032 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.24 KB at sequenceid=324 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/e558dfeea2584fc0a2ad39478f499fa7 2024-12-08T00:21:53,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/9179d79d08894e5c9fdcfe8cb82cb720 is 50, key is test_row_2/C:col10/1733617312477/Put/seqid=0 2024-12-08T00:21:53,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742443_1619 (size=7415) 2024-12-08T00:21:53,052 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.24 KB at sequenceid=324 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/9179d79d08894e5c9fdcfe8cb82cb720 2024-12-08T00:21:53,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/f7773ac0bf204e709884cdd46bfc771c as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/f7773ac0bf204e709884cdd46bfc771c 2024-12-08T00:21:53,061 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/f7773ac0bf204e709884cdd46bfc771c, entries=50, sequenceid=324, filesize=7.2 K 2024-12-08T00:21:53,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/e558dfeea2584fc0a2ad39478f499fa7 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/e558dfeea2584fc0a2ad39478f499fa7 2024-12-08T00:21:53,065 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/e558dfeea2584fc0a2ad39478f499fa7, entries=50, sequenceid=324, filesize=7.2 K 2024-12-08T00:21:53,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/9179d79d08894e5c9fdcfe8cb82cb720 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/9179d79d08894e5c9fdcfe8cb82cb720 2024-12-08T00:21:53,071 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/9179d79d08894e5c9fdcfe8cb82cb720, entries=50, sequenceid=324, filesize=7.2 K 2024-12-08T00:21:53,072 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(3040): Finished flush of dataSize ~6.71 KB/6870, heapSize ~18.28 KB/18720, currentSize=0 B/0 for cc3ed2949e0e40ebaa106781844b31d7 in 70ms, sequenceid=324, compaction requested=false 2024-12-08T00:21:53,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2538): Flush status journal for cc3ed2949e0e40ebaa106781844b31d7: 2024-12-08T00:21:53,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:53,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=149 2024-12-08T00:21:53,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4106): Remote procedure done, pid=149 2024-12-08T00:21:53,074 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=149, resume processing ppid=148 2024-12-08T00:21:53,074 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=149, ppid=148, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 224 msec 2024-12-08T00:21:53,075 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=148, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees in 227 msec 2024-12-08T00:21:53,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on cc3ed2949e0e40ebaa106781844b31d7 2024-12-08T00:21:53,104 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cc3ed2949e0e40ebaa106781844b31d7 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-08T00:21:53,104 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=A 2024-12-08T00:21:53,104 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:53,104 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=B 2024-12-08T00:21:53,104 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:53,104 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=C 2024-12-08T00:21:53,104 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:53,107 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/a5cb7708a81a4e50a51d235d0a67891f is 50, key is test_row_0/A:col10/1733617313102/Put/seqid=0 2024-12-08T00:21:53,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742444_1620 (size=17181) 2024-12-08T00:21:53,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-12-08T00:21:53,160 INFO [Thread-2403 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 148 completed 2024-12-08T00:21:53,161 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T00:21:53,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=150, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=150, table=TestAcidGuarantees 2024-12-08T00:21:53,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-12-08T00:21:53,164 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=150, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=150, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:21:53,165 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=150, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=150, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:21:53,165 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=151, ppid=150, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:21:53,172 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:53,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54004 deadline: 1733617373167, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:53,173 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:53,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54024 deadline: 1733617373167, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:53,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-12-08T00:21:53,276 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:53,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54004 deadline: 1733617373273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:53,277 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:53,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54024 deadline: 1733617373274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:53,318 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:53,318 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=151 2024-12-08T00:21:53,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:53,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. as already flushing 2024-12-08T00:21:53,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:53,318 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] handler.RSProcedureHandler(58): pid=151 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:53,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=151 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:53,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=151 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:53,404 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/dc788bd633354034b83c75248e78bd34 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/dc788bd633354034b83c75248e78bd34 2024-12-08T00:21:53,408 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc3ed2949e0e40ebaa106781844b31d7/C of cc3ed2949e0e40ebaa106781844b31d7 into dc788bd633354034b83c75248e78bd34(size=12.7 K), total size for store is 20.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:53,408 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc3ed2949e0e40ebaa106781844b31d7: 2024-12-08T00:21:53,408 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7., storeName=cc3ed2949e0e40ebaa106781844b31d7/C, priority=13, startTime=1733617312940; duration=0sec 2024-12-08T00:21:53,408 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:53,408 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc3ed2949e0e40ebaa106781844b31d7:C 2024-12-08T00:21:53,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-12-08T00:21:53,470 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:53,471 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=151 2024-12-08T00:21:53,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:53,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. as already flushing 2024-12-08T00:21:53,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:53,471 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] handler.RSProcedureHandler(58): pid=151 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:53,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=151 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:53,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=151 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:53,482 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:53,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54024 deadline: 1733617373478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:53,484 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:53,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54004 deadline: 1733617373478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:53,512 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=335 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/a5cb7708a81a4e50a51d235d0a67891f 2024-12-08T00:21:53,517 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/f4c572b443a24d4192a6ed0ba3baf51a is 50, key is test_row_0/B:col10/1733617313102/Put/seqid=0 2024-12-08T00:21:53,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742445_1621 (size=12301) 2024-12-08T00:21:53,521 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=335 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/f4c572b443a24d4192a6ed0ba3baf51a 2024-12-08T00:21:53,531 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/a0dd03e6891c4e45b6adfb521c315777 is 50, key is test_row_0/C:col10/1733617313102/Put/seqid=0 2024-12-08T00:21:53,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742446_1622 (size=12301) 2024-12-08T00:21:53,566 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:53,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54040 deadline: 1733617373562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:53,624 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:53,625 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=151 2024-12-08T00:21:53,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:53,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. as already flushing 2024-12-08T00:21:53,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:53,625 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] handler.RSProcedureHandler(58): pid=151 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:53,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=151 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:53,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=151 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:53,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-12-08T00:21:53,777 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:53,777 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=151 2024-12-08T00:21:53,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:53,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. as already flushing 2024-12-08T00:21:53,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:53,777 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] handler.RSProcedureHandler(58): pid=151 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:53,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=151 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:53,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=151 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:53,787 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:53,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54024 deadline: 1733617373784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:53,788 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:53,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54004 deadline: 1733617373785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:53,929 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:53,930 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=151 2024-12-08T00:21:53,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:53,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. as already flushing 2024-12-08T00:21:53,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:53,930 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] handler.RSProcedureHandler(58): pid=151 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:53,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=151 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:53,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=151 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:53,935 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=335 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/a0dd03e6891c4e45b6adfb521c315777 2024-12-08T00:21:53,939 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/a5cb7708a81a4e50a51d235d0a67891f as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/a5cb7708a81a4e50a51d235d0a67891f 2024-12-08T00:21:53,943 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/a5cb7708a81a4e50a51d235d0a67891f, entries=250, sequenceid=335, filesize=16.8 K 2024-12-08T00:21:53,943 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/f4c572b443a24d4192a6ed0ba3baf51a as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/f4c572b443a24d4192a6ed0ba3baf51a 2024-12-08T00:21:53,946 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/f4c572b443a24d4192a6ed0ba3baf51a, entries=150, sequenceid=335, filesize=12.0 K 2024-12-08T00:21:53,947 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/a0dd03e6891c4e45b6adfb521c315777 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/a0dd03e6891c4e45b6adfb521c315777 2024-12-08T00:21:53,950 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/a0dd03e6891c4e45b6adfb521c315777, entries=150, sequenceid=335, filesize=12.0 K 2024-12-08T00:21:53,951 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for cc3ed2949e0e40ebaa106781844b31d7 in 847ms, sequenceid=335, compaction requested=true 2024-12-08T00:21:53,951 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cc3ed2949e0e40ebaa106781844b31d7: 2024-12-08T00:21:53,951 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc3ed2949e0e40ebaa106781844b31d7:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:21:53,951 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:53,951 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:21:53,951 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc3ed2949e0e40ebaa106781844b31d7:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T00:21:53,951 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:53,951 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:21:53,951 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc3ed2949e0e40ebaa106781844b31d7:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:21:53,951 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:21:53,952 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 32767 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:21:53,952 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37647 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:21:53,952 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): cc3ed2949e0e40ebaa106781844b31d7/B is initiating minor compaction (all files) 2024-12-08T00:21:53,952 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): cc3ed2949e0e40ebaa106781844b31d7/A is initiating minor compaction (all files) 2024-12-08T00:21:53,952 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc3ed2949e0e40ebaa106781844b31d7/B in TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:53,952 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc3ed2949e0e40ebaa106781844b31d7/A in TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:53,952 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/ade30619a5b64c14a770fda7a2763835, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/e558dfeea2584fc0a2ad39478f499fa7, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/f4c572b443a24d4192a6ed0ba3baf51a] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp, totalSize=32.0 K 2024-12-08T00:21:53,952 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/e5898f0dc3f44ba0ad5c5b0f8dfdbd50, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/f7773ac0bf204e709884cdd46bfc771c, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/a5cb7708a81a4e50a51d235d0a67891f] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp, totalSize=36.8 K 2024-12-08T00:21:53,952 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting ade30619a5b64c14a770fda7a2763835, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1733617311390 2024-12-08T00:21:53,952 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting e5898f0dc3f44ba0ad5c5b0f8dfdbd50, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1733617311390 2024-12-08T00:21:53,953 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting f7773ac0bf204e709884cdd46bfc771c, keycount=50, bloomtype=ROW, size=7.2 K, encoding=NONE, compression=NONE, seqNum=324, earliestPutTs=1733617312477 2024-12-08T00:21:53,953 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting e558dfeea2584fc0a2ad39478f499fa7, keycount=50, bloomtype=ROW, size=7.2 K, encoding=NONE, compression=NONE, seqNum=324, earliestPutTs=1733617312477 2024-12-08T00:21:53,953 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting f4c572b443a24d4192a6ed0ba3baf51a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=335, earliestPutTs=1733617313096 2024-12-08T00:21:53,953 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting a5cb7708a81a4e50a51d235d0a67891f, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=335, earliestPutTs=1733617313096 2024-12-08T00:21:53,960 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc3ed2949e0e40ebaa106781844b31d7#A#compaction#527 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:53,961 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/fad5e1190d6142258a0e354cb2cb99b6 is 50, key is test_row_0/A:col10/1733617313102/Put/seqid=0 2024-12-08T00:21:53,963 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc3ed2949e0e40ebaa106781844b31d7#B#compaction#528 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:53,963 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/79914c6b9a204b4fa52a272a2cb6f881 is 50, key is test_row_0/B:col10/1733617313102/Put/seqid=0 2024-12-08T00:21:53,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742447_1623 (size=13153) 2024-12-08T00:21:53,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742448_1624 (size=13153) 2024-12-08T00:21:53,976 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/79914c6b9a204b4fa52a272a2cb6f881 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/79914c6b9a204b4fa52a272a2cb6f881 2024-12-08T00:21:53,980 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc3ed2949e0e40ebaa106781844b31d7/B of cc3ed2949e0e40ebaa106781844b31d7 into 79914c6b9a204b4fa52a272a2cb6f881(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:53,980 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc3ed2949e0e40ebaa106781844b31d7: 2024-12-08T00:21:53,980 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7., storeName=cc3ed2949e0e40ebaa106781844b31d7/B, priority=13, startTime=1733617313951; duration=0sec 2024-12-08T00:21:53,981 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:21:53,981 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc3ed2949e0e40ebaa106781844b31d7:B 2024-12-08T00:21:53,981 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:21:53,981 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 32767 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:21:53,981 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): cc3ed2949e0e40ebaa106781844b31d7/C is initiating minor compaction (all files) 2024-12-08T00:21:53,981 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc3ed2949e0e40ebaa106781844b31d7/C in TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:53,982 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/dc788bd633354034b83c75248e78bd34, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/9179d79d08894e5c9fdcfe8cb82cb720, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/a0dd03e6891c4e45b6adfb521c315777] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp, totalSize=32.0 K 2024-12-08T00:21:53,982 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting dc788bd633354034b83c75248e78bd34, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1733617311390 2024-12-08T00:21:53,982 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 9179d79d08894e5c9fdcfe8cb82cb720, keycount=50, bloomtype=ROW, size=7.2 K, encoding=NONE, compression=NONE, seqNum=324, earliestPutTs=1733617312477 2024-12-08T00:21:53,982 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting a0dd03e6891c4e45b6adfb521c315777, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=335, earliestPutTs=1733617313096 2024-12-08T00:21:53,989 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc3ed2949e0e40ebaa106781844b31d7#C#compaction#529 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:53,990 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/eacbd958f01443c9b1fa6ce9f2bef0a8 is 50, key is test_row_0/C:col10/1733617313102/Put/seqid=0 2024-12-08T00:21:53,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742449_1625 (size=13153) 2024-12-08T00:21:53,998 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/eacbd958f01443c9b1fa6ce9f2bef0a8 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/eacbd958f01443c9b1fa6ce9f2bef0a8 2024-12-08T00:21:54,003 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc3ed2949e0e40ebaa106781844b31d7/C of cc3ed2949e0e40ebaa106781844b31d7 into eacbd958f01443c9b1fa6ce9f2bef0a8(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:54,003 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc3ed2949e0e40ebaa106781844b31d7: 2024-12-08T00:21:54,003 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7., storeName=cc3ed2949e0e40ebaa106781844b31d7/C, priority=13, startTime=1733617313951; duration=0sec 2024-12-08T00:21:54,003 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:54,003 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc3ed2949e0e40ebaa106781844b31d7:C 2024-12-08T00:21:54,082 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:54,083 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=151 2024-12-08T00:21:54,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:54,083 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2837): Flushing cc3ed2949e0e40ebaa106781844b31d7 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-08T00:21:54,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=A 2024-12-08T00:21:54,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:54,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=B 2024-12-08T00:21:54,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:54,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=C 2024-12-08T00:21:54,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:54,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/c52ea387024243fb918b855001f74737 is 50, key is test_row_0/A:col10/1733617313158/Put/seqid=0 2024-12-08T00:21:54,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742450_1626 (size=12301) 2024-12-08T00:21:54,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-12-08T00:21:54,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on cc3ed2949e0e40ebaa106781844b31d7 2024-12-08T00:21:54,290 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. as already flushing 2024-12-08T00:21:54,309 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:54,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54024 deadline: 1733617374305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:54,310 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:54,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54004 deadline: 1733617374306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:54,370 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/fad5e1190d6142258a0e354cb2cb99b6 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/fad5e1190d6142258a0e354cb2cb99b6 2024-12-08T00:21:54,374 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc3ed2949e0e40ebaa106781844b31d7/A of cc3ed2949e0e40ebaa106781844b31d7 into fad5e1190d6142258a0e354cb2cb99b6(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:54,374 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc3ed2949e0e40ebaa106781844b31d7: 2024-12-08T00:21:54,374 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7., storeName=cc3ed2949e0e40ebaa106781844b31d7/A, priority=13, startTime=1733617313951; duration=0sec 2024-12-08T00:21:54,374 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:54,374 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc3ed2949e0e40ebaa106781844b31d7:A 2024-12-08T00:21:54,414 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:54,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54024 deadline: 1733617374410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:54,414 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:54,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54004 deadline: 1733617374411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:54,495 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=364 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/c52ea387024243fb918b855001f74737 2024-12-08T00:21:54,496 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:54,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54018 deadline: 1733617374494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:54,497 DEBUG [Thread-2399 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4146 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7., hostname=017dd09fb407,36703,1733617179335, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T00:21:54,499 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:54,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54014 deadline: 1733617374497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:54,499 DEBUG [Thread-2397 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4147 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7., hostname=017dd09fb407,36703,1733617179335, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T00:21:54,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/655fdb799db24e7db7a4445d153e4a95 is 50, key is test_row_0/B:col10/1733617313158/Put/seqid=0 2024-12-08T00:21:54,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742451_1627 (size=12301) 2024-12-08T00:21:54,617 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:54,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54024 deadline: 1733617374615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:54,619 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:54,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54004 deadline: 1733617374616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:54,910 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=364 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/655fdb799db24e7db7a4445d153e4a95 2024-12-08T00:21:54,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/c39ce207801b4d90a70a6912dec3cc82 is 50, key is test_row_0/C:col10/1733617313158/Put/seqid=0 2024-12-08T00:21:54,922 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:54,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54024 deadline: 1733617374921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:54,922 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:54,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54004 deadline: 1733617374921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:54,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742452_1628 (size=12301) 2024-12-08T00:21:55,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-12-08T00:21:55,325 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=364 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/c39ce207801b4d90a70a6912dec3cc82 2024-12-08T00:21:55,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/c52ea387024243fb918b855001f74737 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/c52ea387024243fb918b855001f74737 2024-12-08T00:21:55,332 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/c52ea387024243fb918b855001f74737, entries=150, sequenceid=364, filesize=12.0 K 2024-12-08T00:21:55,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/655fdb799db24e7db7a4445d153e4a95 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/655fdb799db24e7db7a4445d153e4a95 2024-12-08T00:21:55,336 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/655fdb799db24e7db7a4445d153e4a95, entries=150, sequenceid=364, filesize=12.0 K 2024-12-08T00:21:55,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/c39ce207801b4d90a70a6912dec3cc82 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/c39ce207801b4d90a70a6912dec3cc82 2024-12-08T00:21:55,343 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/c39ce207801b4d90a70a6912dec3cc82, entries=150, sequenceid=364, filesize=12.0 K 2024-12-08T00:21:55,343 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for cc3ed2949e0e40ebaa106781844b31d7 in 1260ms, sequenceid=364, compaction requested=false 2024-12-08T00:21:55,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2538): Flush status journal for cc3ed2949e0e40ebaa106781844b31d7: 2024-12-08T00:21:55,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:55,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=151 2024-12-08T00:21:55,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4106): Remote procedure done, pid=151 2024-12-08T00:21:55,346 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=151, resume processing ppid=150 2024-12-08T00:21:55,346 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=151, ppid=150, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1790 sec 2024-12-08T00:21:55,347 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=150, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=150, table=TestAcidGuarantees in 2.1850 sec 2024-12-08T00:21:55,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on cc3ed2949e0e40ebaa106781844b31d7 2024-12-08T00:21:55,429 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cc3ed2949e0e40ebaa106781844b31d7 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-08T00:21:55,429 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=A 2024-12-08T00:21:55,429 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:55,429 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=B 2024-12-08T00:21:55,429 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:55,430 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=C 2024-12-08T00:21:55,430 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:55,434 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/106b70d4834c413794fe25a7b533f914 is 50, key is test_row_0/A:col10/1733617314304/Put/seqid=0 2024-12-08T00:21:55,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742453_1629 (size=17181) 2024-12-08T00:21:55,487 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:55,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54004 deadline: 1733617375483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:55,488 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:55,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54024 deadline: 1733617375483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:55,575 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:55,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54040 deadline: 1733617375571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:55,576 DEBUG [Thread-2401 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4146 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7., hostname=017dd09fb407,36703,1733617179335, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T00:21:55,591 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:55,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54004 deadline: 1733617375588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:55,592 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:55,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54024 deadline: 1733617375589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:55,796 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:55,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54004 deadline: 1733617375792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:55,796 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:55,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54024 deadline: 1733617375794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:55,838 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=377 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/106b70d4834c413794fe25a7b533f914 2024-12-08T00:21:55,844 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/35ec2a4e3c404f169450a9132d07fcb1 is 50, key is test_row_0/B:col10/1733617314304/Put/seqid=0 2024-12-08T00:21:55,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742454_1630 (size=12301) 2024-12-08T00:21:55,849 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=377 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/35ec2a4e3c404f169450a9132d07fcb1 2024-12-08T00:21:55,855 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/5fd6951b359447cf8dcd3e6dfef9cd4f is 50, key is test_row_0/C:col10/1733617314304/Put/seqid=0 2024-12-08T00:21:55,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742455_1631 (size=12301) 2024-12-08T00:21:56,104 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:56,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54004 deadline: 1733617376099, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:56,104 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:56,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54024 deadline: 1733617376099, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:56,258 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=377 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/5fd6951b359447cf8dcd3e6dfef9cd4f 2024-12-08T00:21:56,262 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/106b70d4834c413794fe25a7b533f914 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/106b70d4834c413794fe25a7b533f914 2024-12-08T00:21:56,265 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/106b70d4834c413794fe25a7b533f914, entries=250, sequenceid=377, filesize=16.8 K 2024-12-08T00:21:56,265 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/35ec2a4e3c404f169450a9132d07fcb1 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/35ec2a4e3c404f169450a9132d07fcb1 2024-12-08T00:21:56,268 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/35ec2a4e3c404f169450a9132d07fcb1, entries=150, sequenceid=377, filesize=12.0 K 2024-12-08T00:21:56,269 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/5fd6951b359447cf8dcd3e6dfef9cd4f as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/5fd6951b359447cf8dcd3e6dfef9cd4f 2024-12-08T00:21:56,272 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/5fd6951b359447cf8dcd3e6dfef9cd4f, entries=150, sequenceid=377, filesize=12.0 K 2024-12-08T00:21:56,272 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for cc3ed2949e0e40ebaa106781844b31d7 in 843ms, sequenceid=377, compaction requested=true 2024-12-08T00:21:56,272 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cc3ed2949e0e40ebaa106781844b31d7: 2024-12-08T00:21:56,273 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc3ed2949e0e40ebaa106781844b31d7:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:21:56,273 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:56,273 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc3ed2949e0e40ebaa106781844b31d7:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T00:21:56,273 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:56,273 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc3ed2949e0e40ebaa106781844b31d7:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:21:56,273 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:21:56,273 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:21:56,273 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:21:56,274 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:21:56,274 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 42635 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:21:56,274 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): cc3ed2949e0e40ebaa106781844b31d7/B is initiating minor compaction (all files) 2024-12-08T00:21:56,274 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): cc3ed2949e0e40ebaa106781844b31d7/A is initiating minor compaction (all files) 2024-12-08T00:21:56,274 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc3ed2949e0e40ebaa106781844b31d7/B in TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:56,274 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc3ed2949e0e40ebaa106781844b31d7/A in TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:56,274 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/79914c6b9a204b4fa52a272a2cb6f881, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/655fdb799db24e7db7a4445d153e4a95, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/35ec2a4e3c404f169450a9132d07fcb1] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp, totalSize=36.9 K 2024-12-08T00:21:56,274 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/fad5e1190d6142258a0e354cb2cb99b6, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/c52ea387024243fb918b855001f74737, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/106b70d4834c413794fe25a7b533f914] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp, totalSize=41.6 K 2024-12-08T00:21:56,274 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 79914c6b9a204b4fa52a272a2cb6f881, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=335, earliestPutTs=1733617313096 2024-12-08T00:21:56,274 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting fad5e1190d6142258a0e354cb2cb99b6, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=335, earliestPutTs=1733617313096 2024-12-08T00:21:56,274 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting c52ea387024243fb918b855001f74737, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=364, earliestPutTs=1733617313158 2024-12-08T00:21:56,274 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 655fdb799db24e7db7a4445d153e4a95, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=364, earliestPutTs=1733617313158 2024-12-08T00:21:56,275 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 106b70d4834c413794fe25a7b533f914, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=377, earliestPutTs=1733617314294 2024-12-08T00:21:56,275 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 35ec2a4e3c404f169450a9132d07fcb1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=377, earliestPutTs=1733617314304 2024-12-08T00:21:56,280 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc3ed2949e0e40ebaa106781844b31d7#A#compaction#536 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:56,281 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/bb0ce35c4eb04ad89f2b75a59fbe3b8e is 50, key is test_row_0/A:col10/1733617314304/Put/seqid=0 2024-12-08T00:21:56,283 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc3ed2949e0e40ebaa106781844b31d7#B#compaction#537 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:56,284 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/f16d65167eca4ff1b526a624688601ac is 50, key is test_row_0/B:col10/1733617314304/Put/seqid=0 2024-12-08T00:21:56,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742457_1633 (size=13255) 2024-12-08T00:21:56,292 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/f16d65167eca4ff1b526a624688601ac as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/f16d65167eca4ff1b526a624688601ac 2024-12-08T00:21:56,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742456_1632 (size=13255) 2024-12-08T00:21:56,297 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc3ed2949e0e40ebaa106781844b31d7/B of cc3ed2949e0e40ebaa106781844b31d7 into f16d65167eca4ff1b526a624688601ac(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:56,297 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc3ed2949e0e40ebaa106781844b31d7: 2024-12-08T00:21:56,297 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7., storeName=cc3ed2949e0e40ebaa106781844b31d7/B, priority=13, startTime=1733617316273; duration=0sec 2024-12-08T00:21:56,297 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:21:56,297 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc3ed2949e0e40ebaa106781844b31d7:B 2024-12-08T00:21:56,297 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:21:56,298 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:21:56,298 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): cc3ed2949e0e40ebaa106781844b31d7/C is initiating minor compaction (all files) 2024-12-08T00:21:56,298 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc3ed2949e0e40ebaa106781844b31d7/C in TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:56,298 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/eacbd958f01443c9b1fa6ce9f2bef0a8, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/c39ce207801b4d90a70a6912dec3cc82, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/5fd6951b359447cf8dcd3e6dfef9cd4f] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp, totalSize=36.9 K 2024-12-08T00:21:56,298 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting eacbd958f01443c9b1fa6ce9f2bef0a8, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=335, earliestPutTs=1733617313096 2024-12-08T00:21:56,299 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting c39ce207801b4d90a70a6912dec3cc82, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=364, earliestPutTs=1733617313158 2024-12-08T00:21:56,299 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 5fd6951b359447cf8dcd3e6dfef9cd4f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=377, earliestPutTs=1733617314304 2024-12-08T00:21:56,305 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc3ed2949e0e40ebaa106781844b31d7#C#compaction#538 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:56,305 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/f9455a6dc0424114928db382b6c3eb74 is 50, key is test_row_0/C:col10/1733617314304/Put/seqid=0 2024-12-08T00:21:56,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742458_1634 (size=13255) 2024-12-08T00:21:56,312 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/f9455a6dc0424114928db382b6c3eb74 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/f9455a6dc0424114928db382b6c3eb74 2024-12-08T00:21:56,315 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc3ed2949e0e40ebaa106781844b31d7/C of cc3ed2949e0e40ebaa106781844b31d7 into f9455a6dc0424114928db382b6c3eb74(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:56,315 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc3ed2949e0e40ebaa106781844b31d7: 2024-12-08T00:21:56,315 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7., storeName=cc3ed2949e0e40ebaa106781844b31d7/C, priority=13, startTime=1733617316273; duration=0sec 2024-12-08T00:21:56,315 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:56,315 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc3ed2949e0e40ebaa106781844b31d7:C 2024-12-08T00:21:56,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on cc3ed2949e0e40ebaa106781844b31d7 2024-12-08T00:21:56,613 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cc3ed2949e0e40ebaa106781844b31d7 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-08T00:21:56,614 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=A 2024-12-08T00:21:56,614 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:56,614 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=B 2024-12-08T00:21:56,614 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:56,614 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=C 2024-12-08T00:21:56,614 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:56,618 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/a88d92f6f7004fbb8cfc203474618d28 is 50, key is test_row_0/A:col10/1733617316613/Put/seqid=0 2024-12-08T00:21:56,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742459_1635 (size=14741) 2024-12-08T00:21:56,632 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:56,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54024 deadline: 1733617376630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:56,635 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:56,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54004 deadline: 1733617376632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:56,699 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/bb0ce35c4eb04ad89f2b75a59fbe3b8e as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/bb0ce35c4eb04ad89f2b75a59fbe3b8e 2024-12-08T00:21:56,703 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc3ed2949e0e40ebaa106781844b31d7/A of cc3ed2949e0e40ebaa106781844b31d7 into bb0ce35c4eb04ad89f2b75a59fbe3b8e(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:56,703 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc3ed2949e0e40ebaa106781844b31d7: 2024-12-08T00:21:56,703 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7., storeName=cc3ed2949e0e40ebaa106781844b31d7/A, priority=13, startTime=1733617316272; duration=0sec 2024-12-08T00:21:56,703 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:56,703 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc3ed2949e0e40ebaa106781844b31d7:A 2024-12-08T00:21:56,734 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:56,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54024 deadline: 1733617376733, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:56,739 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:56,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54004 deadline: 1733617376736, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:56,937 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:56,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54024 deadline: 1733617376936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:56,941 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:56,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54004 deadline: 1733617376941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:57,022 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=405 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/a88d92f6f7004fbb8cfc203474618d28 2024-12-08T00:21:57,029 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/201abbd9ccac4b468fc50cd226996248 is 50, key is test_row_0/B:col10/1733617316613/Put/seqid=0 2024-12-08T00:21:57,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742460_1636 (size=12301) 2024-12-08T00:21:57,242 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:57,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54024 deadline: 1733617377240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:57,247 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:57,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54004 deadline: 1733617377244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:57,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-12-08T00:21:57,267 INFO [Thread-2403 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 150 completed 2024-12-08T00:21:57,268 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T00:21:57,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=152, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=152, table=TestAcidGuarantees 2024-12-08T00:21:57,269 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=152, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=152, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:21:57,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-08T00:21:57,270 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=152, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=152, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:21:57,270 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=153, ppid=152, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:21:57,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-08T00:21:57,421 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:57,422 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=153 2024-12-08T00:21:57,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:57,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. as already flushing 2024-12-08T00:21:57,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:57,422 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] handler.RSProcedureHandler(58): pid=153 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:57,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=153 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:57,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=153 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:57,433 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=405 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/201abbd9ccac4b468fc50cd226996248 2024-12-08T00:21:57,440 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/4719e789c6bd437088d4473c8ff0987e is 50, key is test_row_0/C:col10/1733617316613/Put/seqid=0 2024-12-08T00:21:57,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742461_1637 (size=12301) 2024-12-08T00:21:57,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-08T00:21:57,574 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:57,575 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=153 2024-12-08T00:21:57,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:57,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. as already flushing 2024-12-08T00:21:57,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:57,575 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] handler.RSProcedureHandler(58): pid=153 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:57,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=153 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:57,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=153 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:57,727 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:57,727 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=153 2024-12-08T00:21:57,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:57,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. as already flushing 2024-12-08T00:21:57,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:57,728 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] handler.RSProcedureHandler(58): pid=153 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:57,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=153 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:57,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=153 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:57,749 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:57,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54024 deadline: 1733617377745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:57,752 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:57,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54004 deadline: 1733617377750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:57,844 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=405 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/4719e789c6bd437088d4473c8ff0987e 2024-12-08T00:21:57,853 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/a88d92f6f7004fbb8cfc203474618d28 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/a88d92f6f7004fbb8cfc203474618d28 2024-12-08T00:21:57,856 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/a88d92f6f7004fbb8cfc203474618d28, entries=200, sequenceid=405, filesize=14.4 K 2024-12-08T00:21:57,857 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/201abbd9ccac4b468fc50cd226996248 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/201abbd9ccac4b468fc50cd226996248 2024-12-08T00:21:57,860 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/201abbd9ccac4b468fc50cd226996248, entries=150, sequenceid=405, filesize=12.0 K 2024-12-08T00:21:57,860 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/4719e789c6bd437088d4473c8ff0987e as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/4719e789c6bd437088d4473c8ff0987e 2024-12-08T00:21:57,864 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/4719e789c6bd437088d4473c8ff0987e, entries=150, sequenceid=405, filesize=12.0 K 2024-12-08T00:21:57,865 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for cc3ed2949e0e40ebaa106781844b31d7 in 1252ms, sequenceid=405, compaction requested=false 2024-12-08T00:21:57,865 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cc3ed2949e0e40ebaa106781844b31d7: 2024-12-08T00:21:57,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-08T00:21:57,880 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:57,880 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=153 2024-12-08T00:21:57,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:57,881 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(2837): Flushing cc3ed2949e0e40ebaa106781844b31d7 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-08T00:21:57,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=A 2024-12-08T00:21:57,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:57,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=B 2024-12-08T00:21:57,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:57,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=C 2024-12-08T00:21:57,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:57,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/db8924a43b164e789c45285ef34fb8e2 is 50, key is test_row_0/A:col10/1733617316631/Put/seqid=0 2024-12-08T00:21:57,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742462_1638 (size=12301) 2024-12-08T00:21:58,289 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=416 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/db8924a43b164e789c45285ef34fb8e2 2024-12-08T00:21:58,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/a46f5dd4e99643a7b7f594701232a538 is 50, key is test_row_0/B:col10/1733617316631/Put/seqid=0 2024-12-08T00:21:58,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742463_1639 (size=12301) 2024-12-08T00:21:58,299 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=416 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/a46f5dd4e99643a7b7f594701232a538 2024-12-08T00:21:58,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/b9c0ec31999843d287b64e08be33e6f8 is 50, key is test_row_0/C:col10/1733617316631/Put/seqid=0 2024-12-08T00:21:58,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742464_1640 (size=12301) 2024-12-08T00:21:58,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-08T00:21:58,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on cc3ed2949e0e40ebaa106781844b31d7 2024-12-08T00:21:58,516 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. as already flushing 2024-12-08T00:21:58,626 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:58,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54014 deadline: 1733617378622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:58,626 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:58,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54018 deadline: 1733617378622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:58,713 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=416 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/b9c0ec31999843d287b64e08be33e6f8 2024-12-08T00:21:58,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/db8924a43b164e789c45285ef34fb8e2 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/db8924a43b164e789c45285ef34fb8e2 2024-12-08T00:21:58,721 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/db8924a43b164e789c45285ef34fb8e2, entries=150, sequenceid=416, filesize=12.0 K 2024-12-08T00:21:58,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/a46f5dd4e99643a7b7f594701232a538 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/a46f5dd4e99643a7b7f594701232a538 2024-12-08T00:21:58,724 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/a46f5dd4e99643a7b7f594701232a538, entries=150, sequenceid=416, filesize=12.0 K 2024-12-08T00:21:58,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/b9c0ec31999843d287b64e08be33e6f8 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/b9c0ec31999843d287b64e08be33e6f8 2024-12-08T00:21:58,728 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/b9c0ec31999843d287b64e08be33e6f8, entries=150, sequenceid=416, filesize=12.0 K 2024-12-08T00:21:58,732 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=161.02 KB/164880 for cc3ed2949e0e40ebaa106781844b31d7 in 852ms, sequenceid=416, compaction requested=true 2024-12-08T00:21:58,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(2538): Flush status journal for cc3ed2949e0e40ebaa106781844b31d7: 2024-12-08T00:21:58,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:58,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=153 2024-12-08T00:21:58,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4106): Remote procedure done, pid=153 2024-12-08T00:21:58,734 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=153, resume processing ppid=152 2024-12-08T00:21:58,734 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=153, ppid=152, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4630 sec 2024-12-08T00:21:58,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on cc3ed2949e0e40ebaa106781844b31d7 2024-12-08T00:21:58,735 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cc3ed2949e0e40ebaa106781844b31d7 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-12-08T00:21:58,736 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=152, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=152, table=TestAcidGuarantees in 1.4670 sec 2024-12-08T00:21:58,736 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=A 2024-12-08T00:21:58,736 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:58,736 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=B 2024-12-08T00:21:58,736 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:58,736 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=C 2024-12-08T00:21:58,736 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:58,740 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/385ca4822fab44f2a0675ea08fdf1570 is 50, key is test_row_0/A:col10/1733617318734/Put/seqid=0 2024-12-08T00:21:58,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742465_1641 (size=17181) 2024-12-08T00:21:58,745 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=445 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/385ca4822fab44f2a0675ea08fdf1570 2024-12-08T00:21:58,751 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/a5c4eb0b4a9a46bdad260f1f8cab23fa is 50, key is test_row_0/B:col10/1733617318734/Put/seqid=0 2024-12-08T00:21:58,755 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:58,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54014 deadline: 1733617378750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:58,756 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:58,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54018 deadline: 1733617378751, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:58,756 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:58,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54024 deadline: 1733617378753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:58,757 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:58,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54004 deadline: 1733617378754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:58,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742466_1642 (size=12301) 2024-12-08T00:21:58,860 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:58,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54014 deadline: 1733617378856, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:58,860 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:58,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54018 deadline: 1733617378857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:59,067 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:59,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54018 deadline: 1733617379063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:59,067 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:59,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54014 deadline: 1733617379063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:59,162 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=445 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/a5c4eb0b4a9a46bdad260f1f8cab23fa 2024-12-08T00:21:59,169 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/52dbe59261a3438b9b8d7c5fb37ec2dc is 50, key is test_row_0/C:col10/1733617318734/Put/seqid=0 2024-12-08T00:21:59,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742467_1643 (size=12301) 2024-12-08T00:21:59,176 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=445 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/52dbe59261a3438b9b8d7c5fb37ec2dc 2024-12-08T00:21:59,180 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/385ca4822fab44f2a0675ea08fdf1570 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/385ca4822fab44f2a0675ea08fdf1570 2024-12-08T00:21:59,183 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/385ca4822fab44f2a0675ea08fdf1570, entries=250, sequenceid=445, filesize=16.8 K 2024-12-08T00:21:59,184 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/a5c4eb0b4a9a46bdad260f1f8cab23fa as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/a5c4eb0b4a9a46bdad260f1f8cab23fa 2024-12-08T00:21:59,186 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/a5c4eb0b4a9a46bdad260f1f8cab23fa, entries=150, sequenceid=445, filesize=12.0 K 2024-12-08T00:21:59,187 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/52dbe59261a3438b9b8d7c5fb37ec2dc as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/52dbe59261a3438b9b8d7c5fb37ec2dc 2024-12-08T00:21:59,190 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/52dbe59261a3438b9b8d7c5fb37ec2dc, entries=150, sequenceid=445, filesize=12.0 K 2024-12-08T00:21:59,190 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=26.84 KB/27480 for cc3ed2949e0e40ebaa106781844b31d7 in 455ms, sequenceid=445, compaction requested=true 2024-12-08T00:21:59,190 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cc3ed2949e0e40ebaa106781844b31d7: 2024-12-08T00:21:59,191 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc3ed2949e0e40ebaa106781844b31d7:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:21:59,191 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:59,191 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T00:21:59,191 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T00:21:59,191 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc3ed2949e0e40ebaa106781844b31d7:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T00:21:59,191 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:59,191 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc3ed2949e0e40ebaa106781844b31d7:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:21:59,191 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:21:59,192 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50158 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T00:21:59,192 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 57478 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T00:21:59,192 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): cc3ed2949e0e40ebaa106781844b31d7/A is initiating minor compaction (all files) 2024-12-08T00:21:59,192 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): cc3ed2949e0e40ebaa106781844b31d7/B is initiating minor compaction (all files) 2024-12-08T00:21:59,192 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc3ed2949e0e40ebaa106781844b31d7/A in TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:59,192 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc3ed2949e0e40ebaa106781844b31d7/B in TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:59,192 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/bb0ce35c4eb04ad89f2b75a59fbe3b8e, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/a88d92f6f7004fbb8cfc203474618d28, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/db8924a43b164e789c45285ef34fb8e2, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/385ca4822fab44f2a0675ea08fdf1570] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp, totalSize=56.1 K 2024-12-08T00:21:59,192 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/f16d65167eca4ff1b526a624688601ac, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/201abbd9ccac4b468fc50cd226996248, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/a46f5dd4e99643a7b7f594701232a538, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/a5c4eb0b4a9a46bdad260f1f8cab23fa] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp, totalSize=49.0 K 2024-12-08T00:21:59,192 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting f16d65167eca4ff1b526a624688601ac, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=377, earliestPutTs=1733617314304 2024-12-08T00:21:59,192 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting bb0ce35c4eb04ad89f2b75a59fbe3b8e, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=377, earliestPutTs=1733617314304 2024-12-08T00:21:59,193 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting a88d92f6f7004fbb8cfc203474618d28, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=405, earliestPutTs=1733617315471 2024-12-08T00:21:59,193 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 201abbd9ccac4b468fc50cd226996248, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=405, earliestPutTs=1733617315471 2024-12-08T00:21:59,193 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting a46f5dd4e99643a7b7f594701232a538, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=416, earliestPutTs=1733617316625 2024-12-08T00:21:59,193 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting db8924a43b164e789c45285ef34fb8e2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=416, earliestPutTs=1733617316625 2024-12-08T00:21:59,193 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting a5c4eb0b4a9a46bdad260f1f8cab23fa, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=445, earliestPutTs=1733617318619 2024-12-08T00:21:59,193 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 385ca4822fab44f2a0675ea08fdf1570, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=445, earliestPutTs=1733617318590 2024-12-08T00:21:59,202 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc3ed2949e0e40ebaa106781844b31d7#B#compaction#548 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:59,202 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/c77e651b081147f1848b997b4af5f857 is 50, key is test_row_0/B:col10/1733617318734/Put/seqid=0 2024-12-08T00:21:59,203 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc3ed2949e0e40ebaa106781844b31d7#A#compaction#549 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:59,203 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/0b411734aa394a94bbbc7c488fef9ec2 is 50, key is test_row_0/A:col10/1733617318734/Put/seqid=0 2024-12-08T00:21:59,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742468_1644 (size=13391) 2024-12-08T00:21:59,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742469_1645 (size=13391) 2024-12-08T00:21:59,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-08T00:21:59,373 INFO [Thread-2403 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 152 completed 2024-12-08T00:21:59,374 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T00:21:59,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=154, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=154, table=TestAcidGuarantees 2024-12-08T00:21:59,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-12-08T00:21:59,376 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=154, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=154, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:21:59,376 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=154, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=154, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:21:59,376 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=155, ppid=154, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:21:59,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on cc3ed2949e0e40ebaa106781844b31d7 2024-12-08T00:21:59,378 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cc3ed2949e0e40ebaa106781844b31d7 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-08T00:21:59,378 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=A 2024-12-08T00:21:59,379 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:59,379 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=B 2024-12-08T00:21:59,379 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:59,379 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=C 2024-12-08T00:21:59,379 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:21:59,382 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/7952e6afbbe94239a5795418e1b1efa9 is 50, key is test_row_0/A:col10/1733617318749/Put/seqid=0 2024-12-08T00:21:59,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742470_1646 (size=14741) 2024-12-08T00:21:59,391 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=456 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/7952e6afbbe94239a5795418e1b1efa9 2024-12-08T00:21:59,399 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/90489a5b2c05483ab3705f1c4659a820 is 50, key is test_row_0/B:col10/1733617318749/Put/seqid=0 2024-12-08T00:21:59,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742471_1647 (size=12301) 2024-12-08T00:21:59,468 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:59,468 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:59,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54018 deadline: 1733617379458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:59,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54014 deadline: 1733617379458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:59,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-12-08T00:21:59,528 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:59,528 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=155 2024-12-08T00:21:59,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:59,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. as already flushing 2024-12-08T00:21:59,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:59,529 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] handler.RSProcedureHandler(58): pid=155 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:59,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=155 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:59,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=155 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:59,572 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:59,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54018 deadline: 1733617379569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:59,574 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:59,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54014 deadline: 1733617379570, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:59,595 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:59,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54040 deadline: 1733617379591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:59,596 DEBUG [Thread-2401 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8166 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7., hostname=017dd09fb407,36703,1733617179335, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T00:21:59,619 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/c77e651b081147f1848b997b4af5f857 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/c77e651b081147f1848b997b4af5f857 2024-12-08T00:21:59,622 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in cc3ed2949e0e40ebaa106781844b31d7/B of cc3ed2949e0e40ebaa106781844b31d7 into c77e651b081147f1848b997b4af5f857(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:59,623 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc3ed2949e0e40ebaa106781844b31d7: 2024-12-08T00:21:59,623 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7., storeName=cc3ed2949e0e40ebaa106781844b31d7/B, priority=12, startTime=1733617319191; duration=0sec 2024-12-08T00:21:59,623 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:21:59,623 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc3ed2949e0e40ebaa106781844b31d7:B 2024-12-08T00:21:59,623 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T00:21:59,624 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50158 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T00:21:59,624 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): cc3ed2949e0e40ebaa106781844b31d7/C is initiating minor compaction (all files) 2024-12-08T00:21:59,625 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc3ed2949e0e40ebaa106781844b31d7/C in TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:59,625 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/f9455a6dc0424114928db382b6c3eb74, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/4719e789c6bd437088d4473c8ff0987e, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/b9c0ec31999843d287b64e08be33e6f8, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/52dbe59261a3438b9b8d7c5fb37ec2dc] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp, totalSize=49.0 K 2024-12-08T00:21:59,625 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting f9455a6dc0424114928db382b6c3eb74, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=377, earliestPutTs=1733617314304 2024-12-08T00:21:59,625 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/0b411734aa394a94bbbc7c488fef9ec2 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/0b411734aa394a94bbbc7c488fef9ec2 2024-12-08T00:21:59,626 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 4719e789c6bd437088d4473c8ff0987e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=405, earliestPutTs=1733617315471 2024-12-08T00:21:59,626 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting b9c0ec31999843d287b64e08be33e6f8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=416, earliestPutTs=1733617316625 2024-12-08T00:21:59,627 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 52dbe59261a3438b9b8d7c5fb37ec2dc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=445, earliestPutTs=1733617318619 2024-12-08T00:21:59,630 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in cc3ed2949e0e40ebaa106781844b31d7/A of cc3ed2949e0e40ebaa106781844b31d7 into 0b411734aa394a94bbbc7c488fef9ec2(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:59,630 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc3ed2949e0e40ebaa106781844b31d7: 2024-12-08T00:21:59,630 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7., storeName=cc3ed2949e0e40ebaa106781844b31d7/A, priority=12, startTime=1733617319191; duration=0sec 2024-12-08T00:21:59,630 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:59,630 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc3ed2949e0e40ebaa106781844b31d7:A 2024-12-08T00:21:59,638 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc3ed2949e0e40ebaa106781844b31d7#C#compaction#552 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:21:59,639 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/0f6d1bd9a7ea48be802442645e62e08f is 50, key is test_row_0/C:col10/1733617318734/Put/seqid=0 2024-12-08T00:21:59,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742472_1648 (size=13391) 2024-12-08T00:21:59,650 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/0f6d1bd9a7ea48be802442645e62e08f as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/0f6d1bd9a7ea48be802442645e62e08f 2024-12-08T00:21:59,654 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in cc3ed2949e0e40ebaa106781844b31d7/C of cc3ed2949e0e40ebaa106781844b31d7 into 0f6d1bd9a7ea48be802442645e62e08f(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:21:59,654 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc3ed2949e0e40ebaa106781844b31d7: 2024-12-08T00:21:59,654 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7., storeName=cc3ed2949e0e40ebaa106781844b31d7/C, priority=12, startTime=1733617319191; duration=0sec 2024-12-08T00:21:59,655 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:21:59,655 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc3ed2949e0e40ebaa106781844b31d7:C 2024-12-08T00:21:59,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-12-08T00:21:59,680 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:59,681 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=155 2024-12-08T00:21:59,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:59,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. as already flushing 2024-12-08T00:21:59,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:59,681 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] handler.RSProcedureHandler(58): pid=155 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:59,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=155 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:59,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=155 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:59,775 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:59,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54018 deadline: 1733617379774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:59,781 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:21:59,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54014 deadline: 1733617379776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:21:59,803 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=456 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/90489a5b2c05483ab3705f1c4659a820 2024-12-08T00:21:59,809 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/d2ed537c87c24075adc5f0ace9a28e8b is 50, key is test_row_0/C:col10/1733617318749/Put/seqid=0 2024-12-08T00:21:59,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742473_1649 (size=12301) 2024-12-08T00:21:59,833 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:59,833 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=155 2024-12-08T00:21:59,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:59,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. as already flushing 2024-12-08T00:21:59,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:59,834 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] handler.RSProcedureHandler(58): pid=155 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:59,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=155 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:59,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=155 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:59,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-12-08T00:21:59,986 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:21:59,986 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=155 2024-12-08T00:21:59,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:59,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. as already flushing 2024-12-08T00:21:59,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:21:59,986 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] handler.RSProcedureHandler(58): pid=155 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:59,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=155 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:21:59,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=155 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:00,080 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:00,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54018 deadline: 1733617380078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:00,086 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:00,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54014 deadline: 1733617380083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:00,138 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:22:00,139 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=155 2024-12-08T00:22:00,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:22:00,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. as already flushing 2024-12-08T00:22:00,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:22:00,139 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] handler.RSProcedureHandler(58): pid=155 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:00,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=155 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:00,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=155 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:00,213 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=456 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/d2ed537c87c24075adc5f0ace9a28e8b 2024-12-08T00:22:00,217 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/7952e6afbbe94239a5795418e1b1efa9 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/7952e6afbbe94239a5795418e1b1efa9 2024-12-08T00:22:00,220 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/7952e6afbbe94239a5795418e1b1efa9, entries=200, sequenceid=456, filesize=14.4 K 2024-12-08T00:22:00,221 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/90489a5b2c05483ab3705f1c4659a820 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/90489a5b2c05483ab3705f1c4659a820 2024-12-08T00:22:00,223 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/90489a5b2c05483ab3705f1c4659a820, entries=150, sequenceid=456, filesize=12.0 K 2024-12-08T00:22:00,224 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/d2ed537c87c24075adc5f0ace9a28e8b as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/d2ed537c87c24075adc5f0ace9a28e8b 2024-12-08T00:22:00,227 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/d2ed537c87c24075adc5f0ace9a28e8b, entries=150, sequenceid=456, filesize=12.0 K 2024-12-08T00:22:00,228 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for cc3ed2949e0e40ebaa106781844b31d7 in 850ms, sequenceid=456, compaction requested=false 2024-12-08T00:22:00,228 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cc3ed2949e0e40ebaa106781844b31d7: 2024-12-08T00:22:00,291 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:22:00,291 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=155 2024-12-08T00:22:00,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:22:00,291 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegion(2837): Flushing cc3ed2949e0e40ebaa106781844b31d7 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-08T00:22:00,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=A 2024-12-08T00:22:00,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:00,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=B 2024-12-08T00:22:00,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:00,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=C 2024-12-08T00:22:00,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:00,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/4247154164b64c3eb59a64abdd3754b2 is 50, key is test_row_0/A:col10/1733617319456/Put/seqid=0 2024-12-08T00:22:00,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742474_1650 (size=12301) 2024-12-08T00:22:00,299 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=484 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/4247154164b64c3eb59a64abdd3754b2 2024-12-08T00:22:00,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/53714fdb6b2344cfa638dc6bd66150e9 is 50, key is test_row_0/B:col10/1733617319456/Put/seqid=0 2024-12-08T00:22:00,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742475_1651 (size=12301) 2024-12-08T00:22:00,319 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=484 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/53714fdb6b2344cfa638dc6bd66150e9 2024-12-08T00:22:00,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/115441d1f3f1499eaeb13e4c72ded4f2 is 50, key is test_row_0/C:col10/1733617319456/Put/seqid=0 2024-12-08T00:22:00,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742476_1652 (size=12301) 2024-12-08T00:22:00,344 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=484 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/115441d1f3f1499eaeb13e4c72ded4f2 2024-12-08T00:22:00,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/4247154164b64c3eb59a64abdd3754b2 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/4247154164b64c3eb59a64abdd3754b2 2024-12-08T00:22:00,353 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/4247154164b64c3eb59a64abdd3754b2, entries=150, sequenceid=484, filesize=12.0 K 2024-12-08T00:22:00,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/53714fdb6b2344cfa638dc6bd66150e9 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/53714fdb6b2344cfa638dc6bd66150e9 2024-12-08T00:22:00,358 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/53714fdb6b2344cfa638dc6bd66150e9, entries=150, sequenceid=484, filesize=12.0 K 2024-12-08T00:22:00,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/115441d1f3f1499eaeb13e4c72ded4f2 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/115441d1f3f1499eaeb13e4c72ded4f2 2024-12-08T00:22:00,364 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/115441d1f3f1499eaeb13e4c72ded4f2, entries=150, sequenceid=484, filesize=12.0 K 2024-12-08T00:22:00,364 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=0 B/0 for cc3ed2949e0e40ebaa106781844b31d7 in 73ms, sequenceid=484, compaction requested=true 2024-12-08T00:22:00,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegion(2538): Flush status journal for cc3ed2949e0e40ebaa106781844b31d7: 2024-12-08T00:22:00,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:22:00,365 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=155 2024-12-08T00:22:00,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4106): Remote procedure done, pid=155 2024-12-08T00:22:00,367 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=155, resume processing ppid=154 2024-12-08T00:22:00,367 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=155, ppid=154, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 989 msec 2024-12-08T00:22:00,368 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=154, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=154, table=TestAcidGuarantees in 993 msec 2024-12-08T00:22:00,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-12-08T00:22:00,479 INFO [Thread-2403 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 154 completed 2024-12-08T00:22:00,480 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T00:22:00,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=156, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=156, table=TestAcidGuarantees 2024-12-08T00:22:00,482 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=156, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=156, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:22:00,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=156 2024-12-08T00:22:00,482 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=156, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=156, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:22:00,482 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=157, ppid=156, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:22:00,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=156 2024-12-08T00:22:00,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on cc3ed2949e0e40ebaa106781844b31d7 2024-12-08T00:22:00,603 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cc3ed2949e0e40ebaa106781844b31d7 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-08T00:22:00,603 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=A 2024-12-08T00:22:00,603 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:00,603 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=B 2024-12-08T00:22:00,603 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:00,603 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=C 2024-12-08T00:22:00,603 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:00,607 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/e3c4bddb45614bf79429c9f706a261c1 is 50, key is test_row_0/A:col10/1733617320590/Put/seqid=0 2024-12-08T00:22:00,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742477_1653 (size=17181) 2024-12-08T00:22:00,611 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=495 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/e3c4bddb45614bf79429c9f706a261c1 2024-12-08T00:22:00,619 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/048c5f75350446aa81f53554cc089cfe is 50, key is test_row_0/B:col10/1733617320590/Put/seqid=0 2024-12-08T00:22:00,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742478_1654 (size=12301) 2024-12-08T00:22:00,633 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:22:00,633 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=157 2024-12-08T00:22:00,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:22:00,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. as already flushing 2024-12-08T00:22:00,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:22:00,634 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] handler.RSProcedureHandler(58): pid=157 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:00,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=157 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:00,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=157 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:00,675 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:00,675 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:00,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54014 deadline: 1733617380670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:00,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54018 deadline: 1733617380669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:00,767 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:00,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54004 deadline: 1733617380764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:00,768 DEBUG [Thread-2393 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4137 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7., hostname=017dd09fb407,36703,1733617179335, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T00:22:00,777 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:00,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54024 deadline: 1733617380774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:00,778 DEBUG [Thread-2395 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4148 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7., hostname=017dd09fb407,36703,1733617179335, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T00:22:00,778 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:00,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54014 deadline: 1733617380776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:00,779 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:00,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54018 deadline: 1733617380777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:00,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=156 2024-12-08T00:22:00,785 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:22:00,786 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=157 2024-12-08T00:22:00,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:22:00,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. as already flushing 2024-12-08T00:22:00,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:22:00,786 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] handler.RSProcedureHandler(58): pid=157 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:00,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=157 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:00,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=157 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:00,938 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:22:00,938 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=157 2024-12-08T00:22:00,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:22:00,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. as already flushing 2024-12-08T00:22:00,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:22:00,939 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] handler.RSProcedureHandler(58): pid=157 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:00,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=157 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:00,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=157 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:00,984 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:00,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54014 deadline: 1733617380979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:00,985 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:00,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54018 deadline: 1733617380981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:01,024 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=495 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/048c5f75350446aa81f53554cc089cfe 2024-12-08T00:22:01,030 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/8ce305649f824644bb2e6f1e10ce4566 is 50, key is test_row_0/C:col10/1733617320590/Put/seqid=0 2024-12-08T00:22:01,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742479_1655 (size=12301) 2024-12-08T00:22:01,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=156 2024-12-08T00:22:01,091 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:22:01,091 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=157 2024-12-08T00:22:01,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:22:01,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. as already flushing 2024-12-08T00:22:01,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:22:01,091 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] handler.RSProcedureHandler(58): pid=157 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:01,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=157 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:01,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=157 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:01,243 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:22:01,244 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=157 2024-12-08T00:22:01,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:22:01,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. as already flushing 2024-12-08T00:22:01,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:22:01,244 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] handler.RSProcedureHandler(58): pid=157 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:01,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=157 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:01,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=157 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:01,291 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:01,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54018 deadline: 1733617381286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:01,291 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:01,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54014 deadline: 1733617381287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:01,396 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:22:01,396 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=157 2024-12-08T00:22:01,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:22:01,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. as already flushing 2024-12-08T00:22:01,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:22:01,397 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] handler.RSProcedureHandler(58): pid=157 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:01,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=157 java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:01,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=157 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:01,441 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=495 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/8ce305649f824644bb2e6f1e10ce4566 2024-12-08T00:22:01,445 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/e3c4bddb45614bf79429c9f706a261c1 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/e3c4bddb45614bf79429c9f706a261c1 2024-12-08T00:22:01,448 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/e3c4bddb45614bf79429c9f706a261c1, entries=250, sequenceid=495, filesize=16.8 K 2024-12-08T00:22:01,449 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/048c5f75350446aa81f53554cc089cfe as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/048c5f75350446aa81f53554cc089cfe 2024-12-08T00:22:01,452 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/048c5f75350446aa81f53554cc089cfe, entries=150, sequenceid=495, filesize=12.0 K 2024-12-08T00:22:01,452 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/8ce305649f824644bb2e6f1e10ce4566 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/8ce305649f824644bb2e6f1e10ce4566 2024-12-08T00:22:01,455 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/8ce305649f824644bb2e6f1e10ce4566, entries=150, sequenceid=495, filesize=12.0 K 2024-12-08T00:22:01,456 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for cc3ed2949e0e40ebaa106781844b31d7 in 853ms, sequenceid=495, compaction requested=true 2024-12-08T00:22:01,456 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cc3ed2949e0e40ebaa106781844b31d7: 2024-12-08T00:22:01,456 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc3ed2949e0e40ebaa106781844b31d7:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:22:01,456 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:22:01,456 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc3ed2949e0e40ebaa106781844b31d7:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T00:22:01,456 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:22:01,456 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T00:22:01,456 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc3ed2949e0e40ebaa106781844b31d7:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:22:01,456 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T00:22:01,456 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:22:01,458 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50294 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T00:22:01,458 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): cc3ed2949e0e40ebaa106781844b31d7/B is initiating minor compaction (all files) 2024-12-08T00:22:01,458 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc3ed2949e0e40ebaa106781844b31d7/B in TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:22:01,458 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 57614 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T00:22:01,458 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/c77e651b081147f1848b997b4af5f857, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/90489a5b2c05483ab3705f1c4659a820, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/53714fdb6b2344cfa638dc6bd66150e9, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/048c5f75350446aa81f53554cc089cfe] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp, totalSize=49.1 K 2024-12-08T00:22:01,458 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): cc3ed2949e0e40ebaa106781844b31d7/A is initiating minor compaction (all files) 2024-12-08T00:22:01,458 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc3ed2949e0e40ebaa106781844b31d7/A in TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:22:01,458 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/0b411734aa394a94bbbc7c488fef9ec2, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/7952e6afbbe94239a5795418e1b1efa9, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/4247154164b64c3eb59a64abdd3754b2, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/e3c4bddb45614bf79429c9f706a261c1] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp, totalSize=56.3 K 2024-12-08T00:22:01,459 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting c77e651b081147f1848b997b4af5f857, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=445, earliestPutTs=1733617318619 2024-12-08T00:22:01,459 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0b411734aa394a94bbbc7c488fef9ec2, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=445, earliestPutTs=1733617318619 2024-12-08T00:22:01,459 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 90489a5b2c05483ab3705f1c4659a820, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=456, earliestPutTs=1733617318749 2024-12-08T00:22:01,459 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7952e6afbbe94239a5795418e1b1efa9, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=456, earliestPutTs=1733617318749 2024-12-08T00:22:01,459 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 53714fdb6b2344cfa638dc6bd66150e9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=484, earliestPutTs=1733617319449 2024-12-08T00:22:01,460 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4247154164b64c3eb59a64abdd3754b2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=484, earliestPutTs=1733617319449 2024-12-08T00:22:01,460 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 048c5f75350446aa81f53554cc089cfe, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=495, earliestPutTs=1733617320590 2024-12-08T00:22:01,460 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting e3c4bddb45614bf79429c9f706a261c1, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=495, earliestPutTs=1733617320590 2024-12-08T00:22:01,478 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc3ed2949e0e40ebaa106781844b31d7#A#compaction#560 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:22:01,478 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc3ed2949e0e40ebaa106781844b31d7#B#compaction#561 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:22:01,479 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/1aa7efc745464d779515f1059ddab0bd is 50, key is test_row_0/B:col10/1733617320590/Put/seqid=0 2024-12-08T00:22:01,479 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/70ed1e057d6a47109c8c4a380b27256d is 50, key is test_row_0/A:col10/1733617320590/Put/seqid=0 2024-12-08T00:22:01,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742481_1657 (size=13527) 2024-12-08T00:22:01,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742480_1656 (size=13527) 2024-12-08T00:22:01,488 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/1aa7efc745464d779515f1059ddab0bd as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/1aa7efc745464d779515f1059ddab0bd 2024-12-08T00:22:01,491 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in cc3ed2949e0e40ebaa106781844b31d7/B of cc3ed2949e0e40ebaa106781844b31d7 into 1aa7efc745464d779515f1059ddab0bd(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:22:01,491 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc3ed2949e0e40ebaa106781844b31d7: 2024-12-08T00:22:01,491 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7., storeName=cc3ed2949e0e40ebaa106781844b31d7/B, priority=12, startTime=1733617321456; duration=0sec 2024-12-08T00:22:01,491 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:22:01,491 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc3ed2949e0e40ebaa106781844b31d7:B 2024-12-08T00:22:01,491 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T00:22:01,492 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50294 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T00:22:01,492 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): cc3ed2949e0e40ebaa106781844b31d7/C is initiating minor compaction (all files) 2024-12-08T00:22:01,492 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc3ed2949e0e40ebaa106781844b31d7/C in TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:22:01,492 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/0f6d1bd9a7ea48be802442645e62e08f, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/d2ed537c87c24075adc5f0ace9a28e8b, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/115441d1f3f1499eaeb13e4c72ded4f2, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/8ce305649f824644bb2e6f1e10ce4566] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp, totalSize=49.1 K 2024-12-08T00:22:01,493 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 0f6d1bd9a7ea48be802442645e62e08f, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=445, earliestPutTs=1733617318619 2024-12-08T00:22:01,493 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting d2ed537c87c24075adc5f0ace9a28e8b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=456, earliestPutTs=1733617318749 2024-12-08T00:22:01,493 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 115441d1f3f1499eaeb13e4c72ded4f2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=484, earliestPutTs=1733617319449 2024-12-08T00:22:01,493 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 8ce305649f824644bb2e6f1e10ce4566, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=495, earliestPutTs=1733617320590 2024-12-08T00:22:01,501 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc3ed2949e0e40ebaa106781844b31d7#C#compaction#562 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:22:01,501 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/188ed94f58b649e0a107de7aee3e7e32 is 50, key is test_row_0/C:col10/1733617320590/Put/seqid=0 2024-12-08T00:22:01,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742482_1658 (size=13527) 2024-12-08T00:22:01,549 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:22:01,549 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=157 2024-12-08T00:22:01,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:22:01,549 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegion(2837): Flushing cc3ed2949e0e40ebaa106781844b31d7 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-08T00:22:01,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=A 2024-12-08T00:22:01,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:01,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=B 2024-12-08T00:22:01,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:01,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=C 2024-12-08T00:22:01,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:01,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/4426d6dcc7e14edd985ae39e06ae4d74 is 50, key is test_row_0/A:col10/1733617320669/Put/seqid=0 2024-12-08T00:22:01,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742483_1659 (size=12301) 2024-12-08T00:22:01,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=156 2024-12-08T00:22:01,621 DEBUG [Thread-2408 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x154f0f85 to 127.0.0.1:62287 2024-12-08T00:22:01,621 DEBUG [Thread-2408 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:22:01,622 DEBUG [Thread-2412 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x054c2725 to 127.0.0.1:62287 2024-12-08T00:22:01,622 DEBUG [Thread-2412 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:22:01,624 DEBUG [Thread-2404 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x11193a0c to 127.0.0.1:62287 2024-12-08T00:22:01,624 DEBUG [Thread-2404 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:22:01,626 DEBUG [Thread-2410 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x008a917b to 127.0.0.1:62287 2024-12-08T00:22:01,626 DEBUG [Thread-2410 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:22:01,626 DEBUG [Thread-2406 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7861b162 to 127.0.0.1:62287 2024-12-08T00:22:01,626 DEBUG [Thread-2406 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:22:01,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on cc3ed2949e0e40ebaa106781844b31d7 2024-12-08T00:22:01,793 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. as already flushing 2024-12-08T00:22:01,793 DEBUG [Thread-2397 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x032bb71c to 127.0.0.1:62287 2024-12-08T00:22:01,793 DEBUG [Thread-2397 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:22:01,795 DEBUG [Thread-2399 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x06bc0f7c to 127.0.0.1:62287 2024-12-08T00:22:01,795 DEBUG [Thread-2399 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:22:01,887 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/70ed1e057d6a47109c8c4a380b27256d as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/70ed1e057d6a47109c8c4a380b27256d 2024-12-08T00:22:01,891 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in cc3ed2949e0e40ebaa106781844b31d7/A of cc3ed2949e0e40ebaa106781844b31d7 into 70ed1e057d6a47109c8c4a380b27256d(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:22:01,891 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc3ed2949e0e40ebaa106781844b31d7: 2024-12-08T00:22:01,891 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7., storeName=cc3ed2949e0e40ebaa106781844b31d7/A, priority=12, startTime=1733617321456; duration=0sec 2024-12-08T00:22:01,891 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:22:01,891 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc3ed2949e0e40ebaa106781844b31d7:A 2024-12-08T00:22:01,907 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/188ed94f58b649e0a107de7aee3e7e32 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/188ed94f58b649e0a107de7aee3e7e32 2024-12-08T00:22:01,911 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in cc3ed2949e0e40ebaa106781844b31d7/C of cc3ed2949e0e40ebaa106781844b31d7 into 188ed94f58b649e0a107de7aee3e7e32(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:22:01,911 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc3ed2949e0e40ebaa106781844b31d7: 2024-12-08T00:22:01,911 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7., storeName=cc3ed2949e0e40ebaa106781844b31d7/C, priority=12, startTime=1733617321456; duration=0sec 2024-12-08T00:22:01,911 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:22:01,911 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc3ed2949e0e40ebaa106781844b31d7:C 2024-12-08T00:22:01,959 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=521 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/4426d6dcc7e14edd985ae39e06ae4d74 2024-12-08T00:22:01,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/17ef4929a0044d71bb0c6477738a254d is 50, key is test_row_0/B:col10/1733617320669/Put/seqid=0 2024-12-08T00:22:01,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742484_1660 (size=12301) 2024-12-08T00:22:02,368 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=521 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/17ef4929a0044d71bb0c6477738a254d 2024-12-08T00:22:02,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/771d950064d54f8c987bfd651a0785aa is 50, key is test_row_0/C:col10/1733617320669/Put/seqid=0 2024-12-08T00:22:02,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742485_1661 (size=12301) 2024-12-08T00:22:02,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=156 2024-12-08T00:22:02,777 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=521 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/771d950064d54f8c987bfd651a0785aa 2024-12-08T00:22:02,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/4426d6dcc7e14edd985ae39e06ae4d74 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/4426d6dcc7e14edd985ae39e06ae4d74 2024-12-08T00:22:02,783 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/4426d6dcc7e14edd985ae39e06ae4d74, entries=150, sequenceid=521, filesize=12.0 K 2024-12-08T00:22:02,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/17ef4929a0044d71bb0c6477738a254d as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/17ef4929a0044d71bb0c6477738a254d 2024-12-08T00:22:02,786 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/17ef4929a0044d71bb0c6477738a254d, entries=150, sequenceid=521, filesize=12.0 K 2024-12-08T00:22:02,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/771d950064d54f8c987bfd651a0785aa as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/771d950064d54f8c987bfd651a0785aa 2024-12-08T00:22:02,789 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/771d950064d54f8c987bfd651a0785aa, entries=150, sequenceid=521, filesize=12.0 K 2024-12-08T00:22:02,790 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=13.42 KB/13740 for cc3ed2949e0e40ebaa106781844b31d7 in 1241ms, sequenceid=521, compaction requested=false 2024-12-08T00:22:02,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegion(2538): Flush status journal for cc3ed2949e0e40ebaa106781844b31d7: 2024-12-08T00:22:02,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:22:02,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=157 2024-12-08T00:22:02,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4106): Remote procedure done, pid=157 2024-12-08T00:22:02,793 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=157, resume processing ppid=156 2024-12-08T00:22:02,793 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=157, ppid=156, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3090 sec 2024-12-08T00:22:02,794 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=156, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=156, table=TestAcidGuarantees in 2.3130 sec 2024-12-08T00:22:04,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=156 2024-12-08T00:22:04,586 INFO [Thread-2403 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 156 completed 2024-12-08T00:22:04,794 DEBUG [Thread-2395 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5ef40578 to 127.0.0.1:62287 2024-12-08T00:22:04,794 DEBUG [Thread-2395 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:22:04,802 DEBUG [Thread-2393 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x32c12a30 to 127.0.0.1:62287 2024-12-08T00:22:04,802 DEBUG [Thread-2393 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:22:07,705 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-08T00:22:09,680 DEBUG [Thread-2401 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1b8b6e04 to 127.0.0.1:62287 2024-12-08T00:22:09,680 DEBUG [Thread-2401 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:22:09,681 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-08T00:22:09,681 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 88 2024-12-08T00:22:09,681 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 78 2024-12-08T00:22:09,681 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 87 2024-12-08T00:22:09,681 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 97 2024-12-08T00:22:09,681 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 63 2024-12-08T00:22:09,681 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-08T00:22:09,681 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-08T00:22:09,681 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2620 2024-12-08T00:22:09,681 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7860 rows 2024-12-08T00:22:09,681 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2642 2024-12-08T00:22:09,681 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7926 rows 2024-12-08T00:22:09,681 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2633 2024-12-08T00:22:09,681 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7895 rows 2024-12-08T00:22:09,681 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2618 2024-12-08T00:22:09,681 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7854 rows 2024-12-08T00:22:09,681 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2634 2024-12-08T00:22:09,681 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7896 rows 2024-12-08T00:22:09,681 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-08T00:22:09,681 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x022a6e9f to 127.0.0.1:62287 2024-12-08T00:22:09,681 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:22:09,683 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-08T00:22:09,683 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-08T00:22:09,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=158, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-08T00:22:09,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=158 2024-12-08T00:22:09,686 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733617329686"}]},"ts":"1733617329686"} 2024-12-08T00:22:09,687 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-08T00:22:09,690 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-08T00:22:09,690 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=159, ppid=158, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-08T00:22:09,691 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=160, ppid=159, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=cc3ed2949e0e40ebaa106781844b31d7, UNASSIGN}] 2024-12-08T00:22:09,692 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=160, ppid=159, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=cc3ed2949e0e40ebaa106781844b31d7, UNASSIGN 2024-12-08T00:22:09,692 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=160 updating hbase:meta row=cc3ed2949e0e40ebaa106781844b31d7, regionState=CLOSING, regionLocation=017dd09fb407,36703,1733617179335 2024-12-08T00:22:09,693 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-08T00:22:09,693 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=161, ppid=160, state=RUNNABLE; CloseRegionProcedure cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335}] 2024-12-08T00:22:09,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=158 2024-12-08T00:22:09,844 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:22:09,845 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] handler.UnassignRegionHandler(124): Close cc3ed2949e0e40ebaa106781844b31d7 2024-12-08T00:22:09,845 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-08T00:22:09,845 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1681): Closing cc3ed2949e0e40ebaa106781844b31d7, disabling compactions & flushes 2024-12-08T00:22:09,845 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:22:09,845 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:22:09,845 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. after waiting 0 ms 2024-12-08T00:22:09,845 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:22:09,845 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(2837): Flushing cc3ed2949e0e40ebaa106781844b31d7 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-08T00:22:09,845 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=A 2024-12-08T00:22:09,845 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:09,845 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=B 2024-12-08T00:22:09,845 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:09,845 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc3ed2949e0e40ebaa106781844b31d7, store=C 2024-12-08T00:22:09,846 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:09,849 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/713148bcac7148c3a86e5c4659835190 is 50, key is test_row_0/A:col10/1733617324793/Put/seqid=0 2024-12-08T00:22:09,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742486_1662 (size=12301) 2024-12-08T00:22:09,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=158 2024-12-08T00:22:10,253 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=531 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/713148bcac7148c3a86e5c4659835190 2024-12-08T00:22:10,258 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/e6aa2e58cca8478eb9cb78572aa13c5a is 50, key is test_row_0/B:col10/1733617324793/Put/seqid=0 2024-12-08T00:22:10,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742487_1663 (size=12301) 2024-12-08T00:22:10,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=158 2024-12-08T00:22:10,661 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=531 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/e6aa2e58cca8478eb9cb78572aa13c5a 2024-12-08T00:22:10,667 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/abc0d63f66cd47c7b57bebc5eb9a1b53 is 50, key is test_row_0/C:col10/1733617324793/Put/seqid=0 2024-12-08T00:22:10,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742488_1664 (size=12301) 2024-12-08T00:22:10,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=158 2024-12-08T00:22:11,070 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=531 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/abc0d63f66cd47c7b57bebc5eb9a1b53 2024-12-08T00:22:11,073 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/A/713148bcac7148c3a86e5c4659835190 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/713148bcac7148c3a86e5c4659835190 2024-12-08T00:22:11,076 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/713148bcac7148c3a86e5c4659835190, entries=150, sequenceid=531, filesize=12.0 K 2024-12-08T00:22:11,077 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/B/e6aa2e58cca8478eb9cb78572aa13c5a as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/e6aa2e58cca8478eb9cb78572aa13c5a 2024-12-08T00:22:11,079 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/e6aa2e58cca8478eb9cb78572aa13c5a, entries=150, sequenceid=531, filesize=12.0 K 2024-12-08T00:22:11,080 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/.tmp/C/abc0d63f66cd47c7b57bebc5eb9a1b53 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/abc0d63f66cd47c7b57bebc5eb9a1b53 2024-12-08T00:22:11,082 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/abc0d63f66cd47c7b57bebc5eb9a1b53, entries=150, sequenceid=531, filesize=12.0 K 2024-12-08T00:22:11,083 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for cc3ed2949e0e40ebaa106781844b31d7 in 1238ms, sequenceid=531, compaction requested=true 2024-12-08T00:22:11,083 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/6e74ac10d34343b885a947fd1373d67f, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/6841e0a9c427495481f7a3780c1d5348, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/d2d8c645962a4086a880d88fc59f9871, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/9ec1f570bc8148c79d6d13f593983431, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/edb3ae3180e640a08a7ee5b3d67ba08a, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/7fbd756d75a54744a37657d58814220f, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/e861d123bc834f809b629517e4838f63, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/a8493ab76b19410182fc995fe393ba93, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/6e932e61f9e2425ea6c4ae2249f2b84a, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/3dd7cd84b6e3425082d67af99c664c07, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/6f6f03b235a0492abf688d6d4f73e524, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/29e2304293394a4393576669541e3170, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/3a271de2808d4e17984938684046a6aa, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/867c171c88eb4e7ab33aa15127ee3480, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/cbed7f0cb2af4f0f88e2a7fc05caa7ff, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/15b91b253f50405bb209b402cbc7c48e, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/1eda07df78284467bac1db8059449737, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/a1febb0d373d4835b3651feeeebff1dc, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/3e7ca6c33a2c4d52901669e3e1d57b10, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/e6bf070b90d14466bba0b4fd264220e9, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/5280b4c6e10a4405a7356c3a9f2cce99, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/8a00e2e8eee146d0849d7f2902f3d8e7, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/e5898f0dc3f44ba0ad5c5b0f8dfdbd50, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/f7773ac0bf204e709884cdd46bfc771c, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/a5cb7708a81a4e50a51d235d0a67891f, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/fad5e1190d6142258a0e354cb2cb99b6, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/c52ea387024243fb918b855001f74737, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/106b70d4834c413794fe25a7b533f914, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/bb0ce35c4eb04ad89f2b75a59fbe3b8e, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/a88d92f6f7004fbb8cfc203474618d28, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/db8924a43b164e789c45285ef34fb8e2, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/385ca4822fab44f2a0675ea08fdf1570, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/0b411734aa394a94bbbc7c488fef9ec2, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/7952e6afbbe94239a5795418e1b1efa9, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/4247154164b64c3eb59a64abdd3754b2, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/e3c4bddb45614bf79429c9f706a261c1] to archive 2024-12-08T00:22:11,084 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-08T00:22:11,085 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/6e74ac10d34343b885a947fd1373d67f to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/6e74ac10d34343b885a947fd1373d67f 2024-12-08T00:22:11,086 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/6841e0a9c427495481f7a3780c1d5348 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/6841e0a9c427495481f7a3780c1d5348 2024-12-08T00:22:11,087 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/d2d8c645962a4086a880d88fc59f9871 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/d2d8c645962a4086a880d88fc59f9871 2024-12-08T00:22:11,088 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/9ec1f570bc8148c79d6d13f593983431 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/9ec1f570bc8148c79d6d13f593983431 2024-12-08T00:22:11,089 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/edb3ae3180e640a08a7ee5b3d67ba08a to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/edb3ae3180e640a08a7ee5b3d67ba08a 2024-12-08T00:22:11,090 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/7fbd756d75a54744a37657d58814220f to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/7fbd756d75a54744a37657d58814220f 2024-12-08T00:22:11,091 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/e861d123bc834f809b629517e4838f63 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/e861d123bc834f809b629517e4838f63 2024-12-08T00:22:11,092 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/a8493ab76b19410182fc995fe393ba93 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/a8493ab76b19410182fc995fe393ba93 2024-12-08T00:22:11,092 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/6e932e61f9e2425ea6c4ae2249f2b84a to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/6e932e61f9e2425ea6c4ae2249f2b84a 2024-12-08T00:22:11,093 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/3dd7cd84b6e3425082d67af99c664c07 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/3dd7cd84b6e3425082d67af99c664c07 2024-12-08T00:22:11,094 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/6f6f03b235a0492abf688d6d4f73e524 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/6f6f03b235a0492abf688d6d4f73e524 2024-12-08T00:22:11,095 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/29e2304293394a4393576669541e3170 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/29e2304293394a4393576669541e3170 2024-12-08T00:22:11,095 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/3a271de2808d4e17984938684046a6aa to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/3a271de2808d4e17984938684046a6aa 2024-12-08T00:22:11,096 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/867c171c88eb4e7ab33aa15127ee3480 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/867c171c88eb4e7ab33aa15127ee3480 2024-12-08T00:22:11,097 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/cbed7f0cb2af4f0f88e2a7fc05caa7ff to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/cbed7f0cb2af4f0f88e2a7fc05caa7ff 2024-12-08T00:22:11,097 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/15b91b253f50405bb209b402cbc7c48e to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/15b91b253f50405bb209b402cbc7c48e 2024-12-08T00:22:11,098 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/1eda07df78284467bac1db8059449737 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/1eda07df78284467bac1db8059449737 2024-12-08T00:22:11,099 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/a1febb0d373d4835b3651feeeebff1dc to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/a1febb0d373d4835b3651feeeebff1dc 2024-12-08T00:22:11,099 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/3e7ca6c33a2c4d52901669e3e1d57b10 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/3e7ca6c33a2c4d52901669e3e1d57b10 2024-12-08T00:22:11,100 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/e6bf070b90d14466bba0b4fd264220e9 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/e6bf070b90d14466bba0b4fd264220e9 2024-12-08T00:22:11,101 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/5280b4c6e10a4405a7356c3a9f2cce99 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/5280b4c6e10a4405a7356c3a9f2cce99 2024-12-08T00:22:11,102 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/8a00e2e8eee146d0849d7f2902f3d8e7 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/8a00e2e8eee146d0849d7f2902f3d8e7 2024-12-08T00:22:11,102 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/e5898f0dc3f44ba0ad5c5b0f8dfdbd50 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/e5898f0dc3f44ba0ad5c5b0f8dfdbd50 2024-12-08T00:22:11,103 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/f7773ac0bf204e709884cdd46bfc771c to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/f7773ac0bf204e709884cdd46bfc771c 2024-12-08T00:22:11,104 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/a5cb7708a81a4e50a51d235d0a67891f to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/a5cb7708a81a4e50a51d235d0a67891f 2024-12-08T00:22:11,105 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/fad5e1190d6142258a0e354cb2cb99b6 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/fad5e1190d6142258a0e354cb2cb99b6 2024-12-08T00:22:11,106 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/c52ea387024243fb918b855001f74737 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/c52ea387024243fb918b855001f74737 2024-12-08T00:22:11,106 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/106b70d4834c413794fe25a7b533f914 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/106b70d4834c413794fe25a7b533f914 2024-12-08T00:22:11,107 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/bb0ce35c4eb04ad89f2b75a59fbe3b8e to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/bb0ce35c4eb04ad89f2b75a59fbe3b8e 2024-12-08T00:22:11,108 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/a88d92f6f7004fbb8cfc203474618d28 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/a88d92f6f7004fbb8cfc203474618d28 2024-12-08T00:22:11,108 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/db8924a43b164e789c45285ef34fb8e2 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/db8924a43b164e789c45285ef34fb8e2 2024-12-08T00:22:11,109 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/385ca4822fab44f2a0675ea08fdf1570 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/385ca4822fab44f2a0675ea08fdf1570 2024-12-08T00:22:11,110 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/0b411734aa394a94bbbc7c488fef9ec2 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/0b411734aa394a94bbbc7c488fef9ec2 2024-12-08T00:22:11,110 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/7952e6afbbe94239a5795418e1b1efa9 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/7952e6afbbe94239a5795418e1b1efa9 2024-12-08T00:22:11,111 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/4247154164b64c3eb59a64abdd3754b2 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/4247154164b64c3eb59a64abdd3754b2 2024-12-08T00:22:11,112 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/e3c4bddb45614bf79429c9f706a261c1 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/e3c4bddb45614bf79429c9f706a261c1 2024-12-08T00:22:11,113 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/c7a6bbb984204ae8bfab7d8976062ce0, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/d93a9d374ca94c33a31ab37cfde4543a, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/89cfa2deff004ca1875dced7411e4da4, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/6d8f3d193fc04814989cd6bbea9308af, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/25b11440b897458baabf93b199b3968b, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/4eeffd78402941be8e55203a2a9075d9, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/c8f31cf51983415bab832755f06eb4c9, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/24f6670016b34f3cb1d01a0bca01c514, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/41bd9df4308148a1ba2a8b4bb21157e5, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/dceed96ee6e14023b11b68dd742eb401, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/600c6e2adbcc4086b7306b1a2898dc8c, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/86b83687f5cf4fa1bfe6268ad55f915f, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/0bb5e73814884ae7a728a281702286bd, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/ab48a7f7b21446b1918c41c7f67e7cd7, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/76c2784e183349168258245561ddc22d, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/368ecbe6a18a48b8936114aacb43b8d1, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/a138c98f153749508d119ee19943633b, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/d24a3ad468424338a70b6e04c4606ddf, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/f8a6a5cb40f74ef694999192a8c59f9f, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/bd55b8bb33fa4fa69cf8d8974b21ed9d, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/ca2466d52c0240a4a9d3c940c561a13d, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/ade30619a5b64c14a770fda7a2763835, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/13488325e624444181742d8691843154, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/e558dfeea2584fc0a2ad39478f499fa7, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/79914c6b9a204b4fa52a272a2cb6f881, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/f4c572b443a24d4192a6ed0ba3baf51a, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/655fdb799db24e7db7a4445d153e4a95, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/f16d65167eca4ff1b526a624688601ac, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/35ec2a4e3c404f169450a9132d07fcb1, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/201abbd9ccac4b468fc50cd226996248, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/a46f5dd4e99643a7b7f594701232a538, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/c77e651b081147f1848b997b4af5f857, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/a5c4eb0b4a9a46bdad260f1f8cab23fa, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/90489a5b2c05483ab3705f1c4659a820, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/53714fdb6b2344cfa638dc6bd66150e9, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/048c5f75350446aa81f53554cc089cfe] to archive 2024-12-08T00:22:11,114 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-08T00:22:11,115 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/c7a6bbb984204ae8bfab7d8976062ce0 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/c7a6bbb984204ae8bfab7d8976062ce0 2024-12-08T00:22:11,115 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/d93a9d374ca94c33a31ab37cfde4543a to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/d93a9d374ca94c33a31ab37cfde4543a 2024-12-08T00:22:11,118 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/89cfa2deff004ca1875dced7411e4da4 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/89cfa2deff004ca1875dced7411e4da4 2024-12-08T00:22:11,118 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/6d8f3d193fc04814989cd6bbea9308af to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/6d8f3d193fc04814989cd6bbea9308af 2024-12-08T00:22:11,119 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/25b11440b897458baabf93b199b3968b to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/25b11440b897458baabf93b199b3968b 2024-12-08T00:22:11,120 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/4eeffd78402941be8e55203a2a9075d9 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/4eeffd78402941be8e55203a2a9075d9 2024-12-08T00:22:11,121 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/c8f31cf51983415bab832755f06eb4c9 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/c8f31cf51983415bab832755f06eb4c9 2024-12-08T00:22:11,122 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/24f6670016b34f3cb1d01a0bca01c514 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/24f6670016b34f3cb1d01a0bca01c514 2024-12-08T00:22:11,122 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/41bd9df4308148a1ba2a8b4bb21157e5 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/41bd9df4308148a1ba2a8b4bb21157e5 2024-12-08T00:22:11,123 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/dceed96ee6e14023b11b68dd742eb401 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/dceed96ee6e14023b11b68dd742eb401 2024-12-08T00:22:11,124 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/600c6e2adbcc4086b7306b1a2898dc8c to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/600c6e2adbcc4086b7306b1a2898dc8c 2024-12-08T00:22:11,125 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/86b83687f5cf4fa1bfe6268ad55f915f to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/86b83687f5cf4fa1bfe6268ad55f915f 2024-12-08T00:22:11,125 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/0bb5e73814884ae7a728a281702286bd to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/0bb5e73814884ae7a728a281702286bd 2024-12-08T00:22:11,126 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/ab48a7f7b21446b1918c41c7f67e7cd7 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/ab48a7f7b21446b1918c41c7f67e7cd7 2024-12-08T00:22:11,127 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/76c2784e183349168258245561ddc22d to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/76c2784e183349168258245561ddc22d 2024-12-08T00:22:11,128 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/368ecbe6a18a48b8936114aacb43b8d1 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/368ecbe6a18a48b8936114aacb43b8d1 2024-12-08T00:22:11,129 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/a138c98f153749508d119ee19943633b to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/a138c98f153749508d119ee19943633b 2024-12-08T00:22:11,129 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/d24a3ad468424338a70b6e04c4606ddf to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/d24a3ad468424338a70b6e04c4606ddf 2024-12-08T00:22:11,130 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/f8a6a5cb40f74ef694999192a8c59f9f to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/f8a6a5cb40f74ef694999192a8c59f9f 2024-12-08T00:22:11,131 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/bd55b8bb33fa4fa69cf8d8974b21ed9d to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/bd55b8bb33fa4fa69cf8d8974b21ed9d 2024-12-08T00:22:11,132 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/ca2466d52c0240a4a9d3c940c561a13d to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/ca2466d52c0240a4a9d3c940c561a13d 2024-12-08T00:22:11,132 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/ade30619a5b64c14a770fda7a2763835 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/ade30619a5b64c14a770fda7a2763835 2024-12-08T00:22:11,133 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/13488325e624444181742d8691843154 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/13488325e624444181742d8691843154 2024-12-08T00:22:11,134 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/e558dfeea2584fc0a2ad39478f499fa7 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/e558dfeea2584fc0a2ad39478f499fa7 2024-12-08T00:22:11,135 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/79914c6b9a204b4fa52a272a2cb6f881 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/79914c6b9a204b4fa52a272a2cb6f881 2024-12-08T00:22:11,135 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/f4c572b443a24d4192a6ed0ba3baf51a to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/f4c572b443a24d4192a6ed0ba3baf51a 2024-12-08T00:22:11,136 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/655fdb799db24e7db7a4445d153e4a95 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/655fdb799db24e7db7a4445d153e4a95 2024-12-08T00:22:11,137 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/f16d65167eca4ff1b526a624688601ac to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/f16d65167eca4ff1b526a624688601ac 2024-12-08T00:22:11,138 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/35ec2a4e3c404f169450a9132d07fcb1 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/35ec2a4e3c404f169450a9132d07fcb1 2024-12-08T00:22:11,138 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/201abbd9ccac4b468fc50cd226996248 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/201abbd9ccac4b468fc50cd226996248 2024-12-08T00:22:11,139 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/a46f5dd4e99643a7b7f594701232a538 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/a46f5dd4e99643a7b7f594701232a538 2024-12-08T00:22:11,140 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/c77e651b081147f1848b997b4af5f857 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/c77e651b081147f1848b997b4af5f857 2024-12-08T00:22:11,141 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/a5c4eb0b4a9a46bdad260f1f8cab23fa to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/a5c4eb0b4a9a46bdad260f1f8cab23fa 2024-12-08T00:22:11,142 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/90489a5b2c05483ab3705f1c4659a820 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/90489a5b2c05483ab3705f1c4659a820 2024-12-08T00:22:11,142 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/53714fdb6b2344cfa638dc6bd66150e9 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/53714fdb6b2344cfa638dc6bd66150e9 2024-12-08T00:22:11,143 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/048c5f75350446aa81f53554cc089cfe to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/048c5f75350446aa81f53554cc089cfe 2024-12-08T00:22:11,144 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/72f959ce6eb9417d92e96a0a764212af, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/c6504c608b87439e94d2d4cee64a0b2d, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/253a4cbfc8614e619dcf355c934048af, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/6d960b888ad74a1f87b71144a83630ea, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/cec338acf293420a9d5a55595d2e673b, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/1328c28560be4450b74b402482cbd3df, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/59acd7f495bc48b8ae7d9ff0fec560da, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/d2321f534a4348e48055d87122c8a4eb, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/7182fabf4980437ba10f29bab95911a5, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/df8c88d68f394d50a500b55dc67ff903, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/268ba11eff7640858c71282975400036, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/28600b4e0e4649f5ba012303553b5881, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/6bfd37ba71dc4e99942b3c872998c4b1, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/1ba072f695804f55b907c986af702435, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/6f460221997942e981a11fdfa6ff542b, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/c34d976733d94595883e752a35be6a84, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/f89d084884e9435aba23ad48a06f5fc6, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/77e2bdf1d80a4b2f8f33723bb15121d4, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/9b30491cb81e4eef9f20300fc849dff4, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/2554bd4b9061459da1c619deb0848746, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/18b753250d7a44b98385bf925d42d5cf, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/dc788bd633354034b83c75248e78bd34, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/393393e381814adb8a7a8c7f8f7b9089, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/9179d79d08894e5c9fdcfe8cb82cb720, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/eacbd958f01443c9b1fa6ce9f2bef0a8, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/a0dd03e6891c4e45b6adfb521c315777, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/c39ce207801b4d90a70a6912dec3cc82, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/f9455a6dc0424114928db382b6c3eb74, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/5fd6951b359447cf8dcd3e6dfef9cd4f, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/4719e789c6bd437088d4473c8ff0987e, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/b9c0ec31999843d287b64e08be33e6f8, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/0f6d1bd9a7ea48be802442645e62e08f, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/52dbe59261a3438b9b8d7c5fb37ec2dc, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/d2ed537c87c24075adc5f0ace9a28e8b, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/115441d1f3f1499eaeb13e4c72ded4f2, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/8ce305649f824644bb2e6f1e10ce4566] to archive 2024-12-08T00:22:11,145 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-08T00:22:11,146 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/72f959ce6eb9417d92e96a0a764212af to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/72f959ce6eb9417d92e96a0a764212af 2024-12-08T00:22:11,147 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/c6504c608b87439e94d2d4cee64a0b2d to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/c6504c608b87439e94d2d4cee64a0b2d 2024-12-08T00:22:11,148 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/253a4cbfc8614e619dcf355c934048af to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/253a4cbfc8614e619dcf355c934048af 2024-12-08T00:22:11,148 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/6d960b888ad74a1f87b71144a83630ea to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/6d960b888ad74a1f87b71144a83630ea 2024-12-08T00:22:11,149 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/cec338acf293420a9d5a55595d2e673b to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/cec338acf293420a9d5a55595d2e673b 2024-12-08T00:22:11,150 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/1328c28560be4450b74b402482cbd3df to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/1328c28560be4450b74b402482cbd3df 2024-12-08T00:22:11,151 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/59acd7f495bc48b8ae7d9ff0fec560da to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/59acd7f495bc48b8ae7d9ff0fec560da 2024-12-08T00:22:11,152 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/d2321f534a4348e48055d87122c8a4eb to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/d2321f534a4348e48055d87122c8a4eb 2024-12-08T00:22:11,152 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/7182fabf4980437ba10f29bab95911a5 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/7182fabf4980437ba10f29bab95911a5 2024-12-08T00:22:11,153 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/df8c88d68f394d50a500b55dc67ff903 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/df8c88d68f394d50a500b55dc67ff903 2024-12-08T00:22:11,154 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/268ba11eff7640858c71282975400036 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/268ba11eff7640858c71282975400036 2024-12-08T00:22:11,155 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/28600b4e0e4649f5ba012303553b5881 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/28600b4e0e4649f5ba012303553b5881 2024-12-08T00:22:11,155 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/6bfd37ba71dc4e99942b3c872998c4b1 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/6bfd37ba71dc4e99942b3c872998c4b1 2024-12-08T00:22:11,156 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/1ba072f695804f55b907c986af702435 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/1ba072f695804f55b907c986af702435 2024-12-08T00:22:11,157 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/6f460221997942e981a11fdfa6ff542b to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/6f460221997942e981a11fdfa6ff542b 2024-12-08T00:22:11,157 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/c34d976733d94595883e752a35be6a84 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/c34d976733d94595883e752a35be6a84 2024-12-08T00:22:11,158 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/f89d084884e9435aba23ad48a06f5fc6 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/f89d084884e9435aba23ad48a06f5fc6 2024-12-08T00:22:11,159 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/77e2bdf1d80a4b2f8f33723bb15121d4 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/77e2bdf1d80a4b2f8f33723bb15121d4 2024-12-08T00:22:11,160 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/9b30491cb81e4eef9f20300fc849dff4 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/9b30491cb81e4eef9f20300fc849dff4 2024-12-08T00:22:11,160 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/2554bd4b9061459da1c619deb0848746 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/2554bd4b9061459da1c619deb0848746 2024-12-08T00:22:11,161 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/18b753250d7a44b98385bf925d42d5cf to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/18b753250d7a44b98385bf925d42d5cf 2024-12-08T00:22:11,162 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/dc788bd633354034b83c75248e78bd34 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/dc788bd633354034b83c75248e78bd34 2024-12-08T00:22:11,162 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/393393e381814adb8a7a8c7f8f7b9089 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/393393e381814adb8a7a8c7f8f7b9089 2024-12-08T00:22:11,163 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/9179d79d08894e5c9fdcfe8cb82cb720 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/9179d79d08894e5c9fdcfe8cb82cb720 2024-12-08T00:22:11,164 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/eacbd958f01443c9b1fa6ce9f2bef0a8 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/eacbd958f01443c9b1fa6ce9f2bef0a8 2024-12-08T00:22:11,164 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/a0dd03e6891c4e45b6adfb521c315777 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/a0dd03e6891c4e45b6adfb521c315777 2024-12-08T00:22:11,165 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/c39ce207801b4d90a70a6912dec3cc82 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/c39ce207801b4d90a70a6912dec3cc82 2024-12-08T00:22:11,166 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/f9455a6dc0424114928db382b6c3eb74 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/f9455a6dc0424114928db382b6c3eb74 2024-12-08T00:22:11,167 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/5fd6951b359447cf8dcd3e6dfef9cd4f to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/5fd6951b359447cf8dcd3e6dfef9cd4f 2024-12-08T00:22:11,167 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/4719e789c6bd437088d4473c8ff0987e to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/4719e789c6bd437088d4473c8ff0987e 2024-12-08T00:22:11,168 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/b9c0ec31999843d287b64e08be33e6f8 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/b9c0ec31999843d287b64e08be33e6f8 2024-12-08T00:22:11,169 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/0f6d1bd9a7ea48be802442645e62e08f to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/0f6d1bd9a7ea48be802442645e62e08f 2024-12-08T00:22:11,170 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/52dbe59261a3438b9b8d7c5fb37ec2dc to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/52dbe59261a3438b9b8d7c5fb37ec2dc 2024-12-08T00:22:11,170 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/d2ed537c87c24075adc5f0ace9a28e8b to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/d2ed537c87c24075adc5f0ace9a28e8b 2024-12-08T00:22:11,171 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/115441d1f3f1499eaeb13e4c72ded4f2 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/115441d1f3f1499eaeb13e4c72ded4f2 2024-12-08T00:22:11,172 DEBUG [StoreCloser-TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/8ce305649f824644bb2e6f1e10ce4566 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/8ce305649f824644bb2e6f1e10ce4566 2024-12-08T00:22:11,175 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/recovered.edits/534.seqid, newMaxSeqId=534, maxSeqId=1 2024-12-08T00:22:11,176 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7. 2024-12-08T00:22:11,176 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1635): Region close journal for cc3ed2949e0e40ebaa106781844b31d7: 2024-12-08T00:22:11,177 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] handler.UnassignRegionHandler(170): Closed cc3ed2949e0e40ebaa106781844b31d7 2024-12-08T00:22:11,178 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=160 updating hbase:meta row=cc3ed2949e0e40ebaa106781844b31d7, regionState=CLOSED 2024-12-08T00:22:11,179 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=161, resume processing ppid=160 2024-12-08T00:22:11,179 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=161, ppid=160, state=SUCCESS; CloseRegionProcedure cc3ed2949e0e40ebaa106781844b31d7, server=017dd09fb407,36703,1733617179335 in 1.4850 sec 2024-12-08T00:22:11,180 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=160, resume processing ppid=159 2024-12-08T00:22:11,180 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=160, ppid=159, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=cc3ed2949e0e40ebaa106781844b31d7, UNASSIGN in 1.4880 sec 2024-12-08T00:22:11,181 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=159, resume processing ppid=158 2024-12-08T00:22:11,181 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=159, ppid=158, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.4910 sec 2024-12-08T00:22:11,182 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733617331182"}]},"ts":"1733617331182"} 2024-12-08T00:22:11,183 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-08T00:22:11,185 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-08T00:22:11,186 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=158, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.5020 sec 2024-12-08T00:22:11,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=158 2024-12-08T00:22:11,790 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 158 completed 2024-12-08T00:22:11,790 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-08T00:22:11,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=162, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T00:22:11,791 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=162, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T00:22:11,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=162 2024-12-08T00:22:11,792 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=162, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T00:22:11,793 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7 2024-12-08T00:22:11,795 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A, FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B, FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C, FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/recovered.edits] 2024-12-08T00:22:11,797 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/4426d6dcc7e14edd985ae39e06ae4d74 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/4426d6dcc7e14edd985ae39e06ae4d74 2024-12-08T00:22:11,797 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/70ed1e057d6a47109c8c4a380b27256d to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/70ed1e057d6a47109c8c4a380b27256d 2024-12-08T00:22:11,798 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/713148bcac7148c3a86e5c4659835190 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/A/713148bcac7148c3a86e5c4659835190 2024-12-08T00:22:11,800 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/17ef4929a0044d71bb0c6477738a254d to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/17ef4929a0044d71bb0c6477738a254d 2024-12-08T00:22:11,801 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/1aa7efc745464d779515f1059ddab0bd to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/1aa7efc745464d779515f1059ddab0bd 2024-12-08T00:22:11,802 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/e6aa2e58cca8478eb9cb78572aa13c5a to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/B/e6aa2e58cca8478eb9cb78572aa13c5a 2024-12-08T00:22:11,803 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/188ed94f58b649e0a107de7aee3e7e32 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/188ed94f58b649e0a107de7aee3e7e32 2024-12-08T00:22:11,804 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/771d950064d54f8c987bfd651a0785aa to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/771d950064d54f8c987bfd651a0785aa 2024-12-08T00:22:11,805 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/abc0d63f66cd47c7b57bebc5eb9a1b53 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/C/abc0d63f66cd47c7b57bebc5eb9a1b53 2024-12-08T00:22:11,807 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/recovered.edits/534.seqid to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7/recovered.edits/534.seqid 2024-12-08T00:22:11,807 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/cc3ed2949e0e40ebaa106781844b31d7 2024-12-08T00:22:11,807 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-08T00:22:11,809 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=162, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T00:22:11,810 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-08T00:22:11,812 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-12-08T00:22:11,812 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=162, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T00:22:11,812 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-12-08T00:22:11,812 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733617331812"}]},"ts":"9223372036854775807"} 2024-12-08T00:22:11,814 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-08T00:22:11,814 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => cc3ed2949e0e40ebaa106781844b31d7, NAME => 'TestAcidGuarantees,,1733617299450.cc3ed2949e0e40ebaa106781844b31d7.', STARTKEY => '', ENDKEY => ''}] 2024-12-08T00:22:11,814 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-12-08T00:22:11,814 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733617331814"}]},"ts":"9223372036854775807"} 2024-12-08T00:22:11,815 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-08T00:22:11,817 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=162, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T00:22:11,818 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=162, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 27 msec 2024-12-08T00:22:11,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=162 2024-12-08T00:22:11,893 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 162 completed 2024-12-08T00:22:11,902 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=237 (was 240), OpenFileDescriptor=448 (was 461), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=357 (was 410), ProcessCount=11 (was 11), AvailableMemoryMB=7531 (was 7556) 2024-12-08T00:22:11,910 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=237, OpenFileDescriptor=448, MaxFileDescriptor=1048576, SystemLoadAverage=357, ProcessCount=11, AvailableMemoryMB=7531 2024-12-08T00:22:11,911 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-08T00:22:11,912 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T00:22:11,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=163, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-08T00:22:11,913 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=163, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-08T00:22:11,913 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:11,913 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 163 2024-12-08T00:22:11,914 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=163, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-08T00:22:11,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-08T00:22:11,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742489_1665 (size=963) 2024-12-08T00:22:12,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-08T00:22:12,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-08T00:22:12,320 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3 2024-12-08T00:22:12,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742490_1666 (size=53) 2024-12-08T00:22:12,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-08T00:22:12,725 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:22:12,725 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 8f0d77d608530d497fe4f44ffdd89312, disabling compactions & flushes 2024-12-08T00:22:12,725 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:12,725 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:12,725 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. after waiting 0 ms 2024-12-08T00:22:12,725 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:12,725 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:12,725 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 8f0d77d608530d497fe4f44ffdd89312: 2024-12-08T00:22:12,726 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=163, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-08T00:22:12,726 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733617332726"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733617332726"}]},"ts":"1733617332726"} 2024-12-08T00:22:12,727 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-08T00:22:12,727 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=163, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-08T00:22:12,728 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733617332727"}]},"ts":"1733617332727"} 2024-12-08T00:22:12,728 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-08T00:22:12,732 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=8f0d77d608530d497fe4f44ffdd89312, ASSIGN}] 2024-12-08T00:22:12,733 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=8f0d77d608530d497fe4f44ffdd89312, ASSIGN 2024-12-08T00:22:12,733 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=8f0d77d608530d497fe4f44ffdd89312, ASSIGN; state=OFFLINE, location=017dd09fb407,36703,1733617179335; forceNewPlan=false, retain=false 2024-12-08T00:22:12,884 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=164 updating hbase:meta row=8f0d77d608530d497fe4f44ffdd89312, regionState=OPENING, regionLocation=017dd09fb407,36703,1733617179335 2024-12-08T00:22:12,885 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=165, ppid=164, state=RUNNABLE; OpenRegionProcedure 8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335}] 2024-12-08T00:22:13,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-08T00:22:13,036 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:22:13,039 INFO [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:13,039 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(7285): Opening region: {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} 2024-12-08T00:22:13,039 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:13,039 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:22:13,039 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(7327): checking encryption for 8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:13,039 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(7330): checking classloading for 8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:13,040 INFO [StoreOpener-8f0d77d608530d497fe4f44ffdd89312-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:13,041 INFO [StoreOpener-8f0d77d608530d497fe4f44ffdd89312-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-08T00:22:13,042 INFO [StoreOpener-8f0d77d608530d497fe4f44ffdd89312-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8f0d77d608530d497fe4f44ffdd89312 columnFamilyName A 2024-12-08T00:22:13,042 DEBUG [StoreOpener-8f0d77d608530d497fe4f44ffdd89312-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:13,042 INFO [StoreOpener-8f0d77d608530d497fe4f44ffdd89312-1 {}] regionserver.HStore(327): Store=8f0d77d608530d497fe4f44ffdd89312/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:22:13,042 INFO [StoreOpener-8f0d77d608530d497fe4f44ffdd89312-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:13,043 INFO [StoreOpener-8f0d77d608530d497fe4f44ffdd89312-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-08T00:22:13,043 INFO [StoreOpener-8f0d77d608530d497fe4f44ffdd89312-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8f0d77d608530d497fe4f44ffdd89312 columnFamilyName B 2024-12-08T00:22:13,043 DEBUG [StoreOpener-8f0d77d608530d497fe4f44ffdd89312-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:13,043 INFO [StoreOpener-8f0d77d608530d497fe4f44ffdd89312-1 {}] regionserver.HStore(327): Store=8f0d77d608530d497fe4f44ffdd89312/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:22:13,044 INFO [StoreOpener-8f0d77d608530d497fe4f44ffdd89312-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:13,044 INFO [StoreOpener-8f0d77d608530d497fe4f44ffdd89312-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-08T00:22:13,044 INFO [StoreOpener-8f0d77d608530d497fe4f44ffdd89312-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8f0d77d608530d497fe4f44ffdd89312 columnFamilyName C 2024-12-08T00:22:13,044 DEBUG [StoreOpener-8f0d77d608530d497fe4f44ffdd89312-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:13,045 INFO [StoreOpener-8f0d77d608530d497fe4f44ffdd89312-1 {}] regionserver.HStore(327): Store=8f0d77d608530d497fe4f44ffdd89312/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:22:13,045 INFO [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:13,045 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:13,045 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:13,047 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-08T00:22:13,047 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(1085): writing seq id for 8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:13,049 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T00:22:13,049 INFO [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(1102): Opened 8f0d77d608530d497fe4f44ffdd89312; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67826021, jitterRate=0.010686472058296204}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-08T00:22:13,049 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(1001): Region open journal for 8f0d77d608530d497fe4f44ffdd89312: 2024-12-08T00:22:13,050 INFO [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312., pid=165, masterSystemTime=1733617333036 2024-12-08T00:22:13,051 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:13,051 INFO [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:13,051 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=164 updating hbase:meta row=8f0d77d608530d497fe4f44ffdd89312, regionState=OPEN, openSeqNum=2, regionLocation=017dd09fb407,36703,1733617179335 2024-12-08T00:22:13,053 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=165, resume processing ppid=164 2024-12-08T00:22:13,053 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=165, ppid=164, state=SUCCESS; OpenRegionProcedure 8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 in 167 msec 2024-12-08T00:22:13,054 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=164, resume processing ppid=163 2024-12-08T00:22:13,054 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=164, ppid=163, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=8f0d77d608530d497fe4f44ffdd89312, ASSIGN in 321 msec 2024-12-08T00:22:13,055 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=163, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-08T00:22:13,055 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733617333055"}]},"ts":"1733617333055"} 2024-12-08T00:22:13,055 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-08T00:22:13,058 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=163, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-08T00:22:13,058 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=163, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1460 sec 2024-12-08T00:22:14,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-08T00:22:14,018 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 163 completed 2024-12-08T00:22:14,019 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5765d46a to 127.0.0.1:62287 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6d9954b7 2024-12-08T00:22:14,025 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3fb684eb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:22:14,026 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:22:14,027 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50094, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:22:14,028 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-08T00:22:14,029 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58520, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-08T00:22:14,030 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-08T00:22:14,030 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T00:22:14,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=166, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-08T00:22:14,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742491_1667 (size=999) 2024-12-08T00:22:14,440 DEBUG [PEWorker-4 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-08T00:22:14,440 INFO [PEWorker-4 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-08T00:22:14,441 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=167, ppid=166, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-08T00:22:14,443 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=168, ppid=167, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=8f0d77d608530d497fe4f44ffdd89312, REOPEN/MOVE}] 2024-12-08T00:22:14,443 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=168, ppid=167, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=8f0d77d608530d497fe4f44ffdd89312, REOPEN/MOVE 2024-12-08T00:22:14,444 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=168 updating hbase:meta row=8f0d77d608530d497fe4f44ffdd89312, regionState=CLOSING, regionLocation=017dd09fb407,36703,1733617179335 2024-12-08T00:22:14,445 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-08T00:22:14,445 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=169, ppid=168, state=RUNNABLE; CloseRegionProcedure 8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335}] 2024-12-08T00:22:14,596 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:22:14,596 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] handler.UnassignRegionHandler(124): Close 8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:14,596 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-08T00:22:14,596 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] regionserver.HRegion(1681): Closing 8f0d77d608530d497fe4f44ffdd89312, disabling compactions & flushes 2024-12-08T00:22:14,596 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:14,596 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:14,596 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. after waiting 0 ms 2024-12-08T00:22:14,596 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:14,600 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-08T00:22:14,600 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:14,600 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] regionserver.HRegion(1635): Region close journal for 8f0d77d608530d497fe4f44ffdd89312: 2024-12-08T00:22:14,600 WARN [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] regionserver.HRegionServer(3786): Not adding moved region record: 8f0d77d608530d497fe4f44ffdd89312 to self. 2024-12-08T00:22:14,601 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] handler.UnassignRegionHandler(170): Closed 8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:14,602 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=168 updating hbase:meta row=8f0d77d608530d497fe4f44ffdd89312, regionState=CLOSED 2024-12-08T00:22:14,603 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=169, resume processing ppid=168 2024-12-08T00:22:14,603 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=169, ppid=168, state=SUCCESS; CloseRegionProcedure 8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 in 157 msec 2024-12-08T00:22:14,604 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=168, ppid=167, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=8f0d77d608530d497fe4f44ffdd89312, REOPEN/MOVE; state=CLOSED, location=017dd09fb407,36703,1733617179335; forceNewPlan=false, retain=true 2024-12-08T00:22:14,754 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=168 updating hbase:meta row=8f0d77d608530d497fe4f44ffdd89312, regionState=OPENING, regionLocation=017dd09fb407,36703,1733617179335 2024-12-08T00:22:14,755 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=170, ppid=168, state=RUNNABLE; OpenRegionProcedure 8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335}] 2024-12-08T00:22:14,907 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:22:14,909 INFO [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=170}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:14,909 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=170}] regionserver.HRegion(7285): Opening region: {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} 2024-12-08T00:22:14,909 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=170}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:14,909 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=170}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:22:14,910 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=170}] regionserver.HRegion(7327): checking encryption for 8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:14,910 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=170}] regionserver.HRegion(7330): checking classloading for 8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:14,911 INFO [StoreOpener-8f0d77d608530d497fe4f44ffdd89312-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:14,911 INFO [StoreOpener-8f0d77d608530d497fe4f44ffdd89312-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-08T00:22:14,911 INFO [StoreOpener-8f0d77d608530d497fe4f44ffdd89312-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8f0d77d608530d497fe4f44ffdd89312 columnFamilyName A 2024-12-08T00:22:14,912 DEBUG [StoreOpener-8f0d77d608530d497fe4f44ffdd89312-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:14,913 INFO [StoreOpener-8f0d77d608530d497fe4f44ffdd89312-1 {}] regionserver.HStore(327): Store=8f0d77d608530d497fe4f44ffdd89312/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:22:14,913 INFO [StoreOpener-8f0d77d608530d497fe4f44ffdd89312-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:14,913 INFO [StoreOpener-8f0d77d608530d497fe4f44ffdd89312-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-08T00:22:14,914 INFO [StoreOpener-8f0d77d608530d497fe4f44ffdd89312-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8f0d77d608530d497fe4f44ffdd89312 columnFamilyName B 2024-12-08T00:22:14,914 DEBUG [StoreOpener-8f0d77d608530d497fe4f44ffdd89312-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:14,914 INFO [StoreOpener-8f0d77d608530d497fe4f44ffdd89312-1 {}] regionserver.HStore(327): Store=8f0d77d608530d497fe4f44ffdd89312/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:22:14,914 INFO [StoreOpener-8f0d77d608530d497fe4f44ffdd89312-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:14,914 INFO [StoreOpener-8f0d77d608530d497fe4f44ffdd89312-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-08T00:22:14,915 INFO [StoreOpener-8f0d77d608530d497fe4f44ffdd89312-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8f0d77d608530d497fe4f44ffdd89312 columnFamilyName C 2024-12-08T00:22:14,915 DEBUG [StoreOpener-8f0d77d608530d497fe4f44ffdd89312-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:14,915 INFO [StoreOpener-8f0d77d608530d497fe4f44ffdd89312-1 {}] regionserver.HStore(327): Store=8f0d77d608530d497fe4f44ffdd89312/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:22:14,915 INFO [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=170}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:14,915 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=170}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:14,916 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=170}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:14,917 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=170}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-08T00:22:14,918 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=170}] regionserver.HRegion(1085): writing seq id for 8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:14,919 INFO [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=170}] regionserver.HRegion(1102): Opened 8f0d77d608530d497fe4f44ffdd89312; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63549837, jitterRate=-0.05303363502025604}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-08T00:22:14,919 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=170}] regionserver.HRegion(1001): Region open journal for 8f0d77d608530d497fe4f44ffdd89312: 2024-12-08T00:22:14,920 INFO [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=170}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312., pid=170, masterSystemTime=1733617334907 2024-12-08T00:22:14,921 DEBUG [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=170}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:14,921 INFO [RS_OPEN_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_OPEN_REGION, pid=170}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:14,921 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=168 updating hbase:meta row=8f0d77d608530d497fe4f44ffdd89312, regionState=OPEN, openSeqNum=5, regionLocation=017dd09fb407,36703,1733617179335 2024-12-08T00:22:14,923 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=170, resume processing ppid=168 2024-12-08T00:22:14,923 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=170, ppid=168, state=SUCCESS; OpenRegionProcedure 8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 in 167 msec 2024-12-08T00:22:14,924 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=168, resume processing ppid=167 2024-12-08T00:22:14,924 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=168, ppid=167, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=8f0d77d608530d497fe4f44ffdd89312, REOPEN/MOVE in 480 msec 2024-12-08T00:22:14,925 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=167, resume processing ppid=166 2024-12-08T00:22:14,925 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=167, ppid=166, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 483 msec 2024-12-08T00:22:14,926 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=166, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 896 msec 2024-12-08T00:22:14,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=166 2024-12-08T00:22:14,928 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2ac53e79 to 127.0.0.1:62287 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@d5efb7a 2024-12-08T00:22:14,931 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@644b7e6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:22:14,932 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x05bc9c3e to 127.0.0.1:62287 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7fc332d8 2024-12-08T00:22:14,940 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c9b5141, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:22:14,941 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7181df3b to 127.0.0.1:62287 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@17327621 2024-12-08T00:22:14,944 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11a52cdf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:22:14,945 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x11030ef5 to 127.0.0.1:62287 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1584f18a 2024-12-08T00:22:14,949 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d7fe431, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:22:14,950 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x69abefea to 127.0.0.1:62287 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5b914bf4 2024-12-08T00:22:14,954 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@91d72db, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:22:14,954 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3f6a59e4 to 127.0.0.1:62287 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5d836f78 2024-12-08T00:22:14,959 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d7fe93b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:22:14,960 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x150e08ed to 127.0.0.1:62287 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@53305d9b 2024-12-08T00:22:14,964 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11c440f7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:22:14,965 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3a3b66d3 to 127.0.0.1:62287 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6bb6288a 2024-12-08T00:22:14,969 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@58460ef3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:22:14,969 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5cfdf76c to 127.0.0.1:62287 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6556601 2024-12-08T00:22:14,974 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e8cd1ae, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:22:14,974 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x68c2838a to 127.0.0.1:62287 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@458a85fd 2024-12-08T00:22:14,976 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4d832d43, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:22:14,979 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T00:22:14,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=171, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees 2024-12-08T00:22:14,980 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=171, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:22:14,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-12-08T00:22:14,980 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=171, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:22:14,981 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=172, ppid=171, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:22:14,987 DEBUG [hconnection-0x23799977-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:22:14,988 DEBUG [hconnection-0x3efb0e8c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:22:14,988 DEBUG [hconnection-0x70eea75-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:22:14,989 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50102, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:22:14,989 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50118, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:22:14,989 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50104, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:22:15,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on 8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:15,000 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8f0d77d608530d497fe4f44ffdd89312 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-08T00:22:15,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=A 2024-12-08T00:22:15,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:15,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=B 2024-12-08T00:22:15,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:15,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=C 2024-12-08T00:22:15,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:15,005 DEBUG [hconnection-0x3fa78d92-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:22:15,006 DEBUG [hconnection-0x31fec9b7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:22:15,006 DEBUG [hconnection-0x70da409a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:22:15,006 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50144, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:22:15,007 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50150, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:22:15,007 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50162, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:22:15,010 DEBUG [hconnection-0x2865b9ae-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:22:15,011 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50166, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:22:15,024 DEBUG [hconnection-0xe6a30f7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:22:15,025 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50172, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:22:15,028 DEBUG [hconnection-0xa284c0a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:22:15,029 DEBUG [hconnection-0x740b8b54-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:22:15,029 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50186, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:22:15,030 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50200, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:22:15,034 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120880144989d03740578fc18a21c70dd4ef_8f0d77d608530d497fe4f44ffdd89312 is 50, key is test_row_0/A:col10/1733617334995/Put/seqid=0 2024-12-08T00:22:15,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742492_1668 (size=12154) 2024-12-08T00:22:15,056 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:15,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617395054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:15,059 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:15,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617395056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:15,059 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:15,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733617395056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:15,059 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:15,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50172 deadline: 1733617395057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:15,059 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:15,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617395057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:15,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-12-08T00:22:15,132 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:22:15,132 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-12-08T00:22:15,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:15,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. as already flushing 2024-12-08T00:22:15,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:15,132 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:15,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:15,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:15,158 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:15,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617395157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:15,161 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:15,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617395160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:15,161 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:15,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50172 deadline: 1733617395160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:15,161 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:15,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733617395160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:15,161 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:15,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617395160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:15,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-12-08T00:22:15,284 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:22:15,285 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-12-08T00:22:15,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:15,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. as already flushing 2024-12-08T00:22:15,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:15,285 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:15,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:15,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:15,362 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:15,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617395360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:15,362 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:15,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50172 deadline: 1733617395362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:15,364 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:15,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617395362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:15,364 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:15,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617395363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:15,364 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:15,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733617395363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:15,437 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:22:15,437 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:15,437 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-12-08T00:22:15,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:15,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. as already flushing 2024-12-08T00:22:15,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:15,438 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:15,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:15,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:15,441 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120880144989d03740578fc18a21c70dd4ef_8f0d77d608530d497fe4f44ffdd89312 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120880144989d03740578fc18a21c70dd4ef_8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:15,445 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/4529900537b94db9aab1f3fe9c306f41, store: [table=TestAcidGuarantees family=A region=8f0d77d608530d497fe4f44ffdd89312] 2024-12-08T00:22:15,445 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/4529900537b94db9aab1f3fe9c306f41 is 175, key is test_row_0/A:col10/1733617334995/Put/seqid=0 2024-12-08T00:22:15,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742493_1669 (size=30955) 2024-12-08T00:22:15,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-12-08T00:22:15,590 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:22:15,591 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-12-08T00:22:15,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:15,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. as already flushing 2024-12-08T00:22:15,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:15,591 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:15,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:15,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:15,664 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:15,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617395663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:15,666 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:15,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617395665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:15,666 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:15,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617395665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:15,666 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:15,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733617395665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:15,667 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:15,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50172 deadline: 1733617395665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:15,743 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:22:15,743 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-12-08T00:22:15,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:15,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. as already flushing 2024-12-08T00:22:15,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:15,744 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:15,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:15,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:15,875 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=15, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/4529900537b94db9aab1f3fe9c306f41 2024-12-08T00:22:15,901 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:22:15,902 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-12-08T00:22:15,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:15,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. as already flushing 2024-12-08T00:22:15,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:15,902 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:15,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:15,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:15,914 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/149f6c5aad8c4ab3a2ec22771afe4276 is 50, key is test_row_0/B:col10/1733617334995/Put/seqid=0 2024-12-08T00:22:15,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742494_1670 (size=12001) 2024-12-08T00:22:15,920 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/149f6c5aad8c4ab3a2ec22771afe4276 2024-12-08T00:22:15,962 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/6e038cb800be4b638348d8254525ae9b is 50, key is test_row_0/C:col10/1733617334995/Put/seqid=0 2024-12-08T00:22:15,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742495_1671 (size=12001) 2024-12-08T00:22:16,054 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:22:16,054 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-12-08T00:22:16,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:16,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. as already flushing 2024-12-08T00:22:16,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:16,055 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:16,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:16,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:16,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-12-08T00:22:16,166 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:16,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617396165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:16,168 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:16,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617396167, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:16,170 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:16,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733617396169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:16,171 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:16,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50172 deadline: 1733617396170, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:16,171 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:16,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617396170, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:16,207 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:22:16,207 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-12-08T00:22:16,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:16,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. as already flushing 2024-12-08T00:22:16,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:16,207 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:16,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:16,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:16,359 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:22:16,360 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-12-08T00:22:16,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:16,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. as already flushing 2024-12-08T00:22:16,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:16,360 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:16,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:16,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:16,377 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/6e038cb800be4b638348d8254525ae9b 2024-12-08T00:22:16,381 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/4529900537b94db9aab1f3fe9c306f41 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/4529900537b94db9aab1f3fe9c306f41 2024-12-08T00:22:16,384 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/4529900537b94db9aab1f3fe9c306f41, entries=150, sequenceid=15, filesize=30.2 K 2024-12-08T00:22:16,385 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/149f6c5aad8c4ab3a2ec22771afe4276 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/149f6c5aad8c4ab3a2ec22771afe4276 2024-12-08T00:22:16,389 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/149f6c5aad8c4ab3a2ec22771afe4276, entries=150, sequenceid=15, filesize=11.7 K 2024-12-08T00:22:16,389 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/6e038cb800be4b638348d8254525ae9b as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/6e038cb800be4b638348d8254525ae9b 2024-12-08T00:22:16,393 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/6e038cb800be4b638348d8254525ae9b, entries=150, sequenceid=15, filesize=11.7 K 2024-12-08T00:22:16,393 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=161.02 KB/164880 for 8f0d77d608530d497fe4f44ffdd89312 in 1393ms, sequenceid=15, compaction requested=false 2024-12-08T00:22:16,393 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-08T00:22:16,394 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8f0d77d608530d497fe4f44ffdd89312: 2024-12-08T00:22:16,512 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:22:16,512 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-12-08T00:22:16,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:16,513 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2837): Flushing 8f0d77d608530d497fe4f44ffdd89312 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-08T00:22:16,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=A 2024-12-08T00:22:16,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:16,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=B 2024-12-08T00:22:16,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:16,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=C 2024-12-08T00:22:16,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:16,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208e43c6659ef9b4ed78d826346e75bae4b_8f0d77d608530d497fe4f44ffdd89312 is 50, key is test_row_0/A:col10/1733617335052/Put/seqid=0 2024-12-08T00:22:16,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742496_1672 (size=12154) 2024-12-08T00:22:16,698 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-08T00:22:16,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:16,947 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208e43c6659ef9b4ed78d826346e75bae4b_8f0d77d608530d497fe4f44ffdd89312 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208e43c6659ef9b4ed78d826346e75bae4b_8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:16,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/4b916f9b67c04fe0b5996cacc56d2559, store: [table=TestAcidGuarantees family=A region=8f0d77d608530d497fe4f44ffdd89312] 2024-12-08T00:22:16,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/4b916f9b67c04fe0b5996cacc56d2559 is 175, key is test_row_0/A:col10/1733617335052/Put/seqid=0 2024-12-08T00:22:16,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742497_1673 (size=30955) 2024-12-08T00:22:17,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-12-08T00:22:17,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on 8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:17,172 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. as already flushing 2024-12-08T00:22:17,177 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:17,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617397175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:17,178 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:17,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617397175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:17,179 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:17,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617397177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:17,180 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:17,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733617397179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:17,182 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:17,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50172 deadline: 1733617397181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:17,279 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:17,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617397278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:17,280 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:17,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617397279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:17,281 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:17,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617397280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:17,353 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=42, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/4b916f9b67c04fe0b5996cacc56d2559 2024-12-08T00:22:17,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/7892de1ae6d04801ad3f643cf7af3bd7 is 50, key is test_row_0/B:col10/1733617335052/Put/seqid=0 2024-12-08T00:22:17,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742498_1674 (size=12001) 2024-12-08T00:22:17,398 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/7892de1ae6d04801ad3f643cf7af3bd7 2024-12-08T00:22:17,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/1532fdc91e024c1190826ca5728b2759 is 50, key is test_row_0/C:col10/1733617335052/Put/seqid=0 2024-12-08T00:22:17,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742499_1675 (size=12001) 2024-12-08T00:22:17,409 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/1532fdc91e024c1190826ca5728b2759 2024-12-08T00:22:17,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/4b916f9b67c04fe0b5996cacc56d2559 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/4b916f9b67c04fe0b5996cacc56d2559 2024-12-08T00:22:17,416 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/4b916f9b67c04fe0b5996cacc56d2559, entries=150, sequenceid=42, filesize=30.2 K 2024-12-08T00:22:17,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/7892de1ae6d04801ad3f643cf7af3bd7 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/7892de1ae6d04801ad3f643cf7af3bd7 2024-12-08T00:22:17,420 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/7892de1ae6d04801ad3f643cf7af3bd7, entries=150, sequenceid=42, filesize=11.7 K 2024-12-08T00:22:17,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/1532fdc91e024c1190826ca5728b2759 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/1532fdc91e024c1190826ca5728b2759 2024-12-08T00:22:17,424 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/1532fdc91e024c1190826ca5728b2759, entries=150, sequenceid=42, filesize=11.7 K 2024-12-08T00:22:17,424 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 8f0d77d608530d497fe4f44ffdd89312 in 911ms, sequenceid=42, compaction requested=false 2024-12-08T00:22:17,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2538): Flush status journal for 8f0d77d608530d497fe4f44ffdd89312: 2024-12-08T00:22:17,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:17,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=172 2024-12-08T00:22:17,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4106): Remote procedure done, pid=172 2024-12-08T00:22:17,427 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=172, resume processing ppid=171 2024-12-08T00:22:17,427 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=172, ppid=171, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.4460 sec 2024-12-08T00:22:17,429 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=171, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees in 2.4490 sec 2024-12-08T00:22:17,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on 8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:17,484 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8f0d77d608530d497fe4f44ffdd89312 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-08T00:22:17,485 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=A 2024-12-08T00:22:17,485 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:17,485 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=B 2024-12-08T00:22:17,485 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:17,485 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=C 2024-12-08T00:22:17,485 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:17,491 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412083f855968f4254b549d7bf1a087d8faf5_8f0d77d608530d497fe4f44ffdd89312 is 50, key is test_row_0/A:col10/1733617337481/Put/seqid=0 2024-12-08T00:22:17,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742500_1676 (size=12154) 2024-12-08T00:22:17,495 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:17,498 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412083f855968f4254b549d7bf1a087d8faf5_8f0d77d608530d497fe4f44ffdd89312 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412083f855968f4254b549d7bf1a087d8faf5_8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:17,498 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/af00e3df741e4e309fe8d7a4c74a0fe4, store: [table=TestAcidGuarantees family=A region=8f0d77d608530d497fe4f44ffdd89312] 2024-12-08T00:22:17,499 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/af00e3df741e4e309fe8d7a4c74a0fe4 is 175, key is test_row_0/A:col10/1733617337481/Put/seqid=0 2024-12-08T00:22:17,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742501_1677 (size=30955) 2024-12-08T00:22:17,532 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:17,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617397529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:17,532 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:17,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617397528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:17,532 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:17,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617397529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:17,634 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:17,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617397633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:17,634 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:17,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617397633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:17,635 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:17,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617397634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:17,837 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:17,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617397836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:17,838 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:17,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617397836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:17,838 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:17,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617397836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:17,907 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=54, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/af00e3df741e4e309fe8d7a4c74a0fe4 2024-12-08T00:22:17,914 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/d4844ba881b944ebae04b11c586d0788 is 50, key is test_row_0/B:col10/1733617337481/Put/seqid=0 2024-12-08T00:22:17,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742502_1678 (size=12001) 2024-12-08T00:22:17,918 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/d4844ba881b944ebae04b11c586d0788 2024-12-08T00:22:17,924 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/cc6776a0821c4816afdcfa6520a04205 is 50, key is test_row_0/C:col10/1733617337481/Put/seqid=0 2024-12-08T00:22:17,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742503_1679 (size=12001) 2024-12-08T00:22:17,954 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/cc6776a0821c4816afdcfa6520a04205 2024-12-08T00:22:17,959 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/af00e3df741e4e309fe8d7a4c74a0fe4 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/af00e3df741e4e309fe8d7a4c74a0fe4 2024-12-08T00:22:17,963 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/af00e3df741e4e309fe8d7a4c74a0fe4, entries=150, sequenceid=54, filesize=30.2 K 2024-12-08T00:22:17,964 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/d4844ba881b944ebae04b11c586d0788 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/d4844ba881b944ebae04b11c586d0788 2024-12-08T00:22:17,968 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/d4844ba881b944ebae04b11c586d0788, entries=150, sequenceid=54, filesize=11.7 K 2024-12-08T00:22:17,968 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/cc6776a0821c4816afdcfa6520a04205 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/cc6776a0821c4816afdcfa6520a04205 2024-12-08T00:22:17,977 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/cc6776a0821c4816afdcfa6520a04205, entries=150, sequenceid=54, filesize=11.7 K 2024-12-08T00:22:17,978 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 8f0d77d608530d497fe4f44ffdd89312 in 493ms, sequenceid=54, compaction requested=true 2024-12-08T00:22:17,978 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8f0d77d608530d497fe4f44ffdd89312: 2024-12-08T00:22:17,978 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8f0d77d608530d497fe4f44ffdd89312:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:22:17,978 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:22:17,978 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8f0d77d608530d497fe4f44ffdd89312:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T00:22:17,978 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:22:17,978 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:22:17,978 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8f0d77d608530d497fe4f44ffdd89312:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:22:17,978 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-08T00:22:17,978 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:22:17,979 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:22:17,979 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 92865 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:22:17,979 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): 8f0d77d608530d497fe4f44ffdd89312/B is initiating minor compaction (all files) 2024-12-08T00:22:17,979 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): 8f0d77d608530d497fe4f44ffdd89312/A is initiating minor compaction (all files) 2024-12-08T00:22:17,979 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8f0d77d608530d497fe4f44ffdd89312/B in TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:17,979 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8f0d77d608530d497fe4f44ffdd89312/A in TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:17,979 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/4529900537b94db9aab1f3fe9c306f41, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/4b916f9b67c04fe0b5996cacc56d2559, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/af00e3df741e4e309fe8d7a4c74a0fe4] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp, totalSize=90.7 K 2024-12-08T00:22:17,979 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/149f6c5aad8c4ab3a2ec22771afe4276, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/7892de1ae6d04801ad3f643cf7af3bd7, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/d4844ba881b944ebae04b11c586d0788] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp, totalSize=35.2 K 2024-12-08T00:22:17,979 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:17,979 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. files: [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/4529900537b94db9aab1f3fe9c306f41, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/4b916f9b67c04fe0b5996cacc56d2559, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/af00e3df741e4e309fe8d7a4c74a0fe4] 2024-12-08T00:22:17,980 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4529900537b94db9aab1f3fe9c306f41, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1733617334995 2024-12-08T00:22:17,980 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 149f6c5aad8c4ab3a2ec22771afe4276, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1733617334995 2024-12-08T00:22:17,980 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 7892de1ae6d04801ad3f643cf7af3bd7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1733617335052 2024-12-08T00:22:17,980 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4b916f9b67c04fe0b5996cacc56d2559, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1733617335052 2024-12-08T00:22:17,980 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting d4844ba881b944ebae04b11c586d0788, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733617337176 2024-12-08T00:22:17,980 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting af00e3df741e4e309fe8d7a4c74a0fe4, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733617337176 2024-12-08T00:22:18,000 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=8f0d77d608530d497fe4f44ffdd89312] 2024-12-08T00:22:18,003 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8f0d77d608530d497fe4f44ffdd89312#B#compaction#579 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:22:18,004 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/ff281ce7e7734a03a21ccd1517c49d31 is 50, key is test_row_0/B:col10/1733617337481/Put/seqid=0 2024-12-08T00:22:18,020 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241208f79744eac0cc4135ae79efe01c214054_8f0d77d608530d497fe4f44ffdd89312 store=[table=TestAcidGuarantees family=A region=8f0d77d608530d497fe4f44ffdd89312] 2024-12-08T00:22:18,024 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241208f79744eac0cc4135ae79efe01c214054_8f0d77d608530d497fe4f44ffdd89312, store=[table=TestAcidGuarantees family=A region=8f0d77d608530d497fe4f44ffdd89312] 2024-12-08T00:22:18,024 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208f79744eac0cc4135ae79efe01c214054_8f0d77d608530d497fe4f44ffdd89312 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=8f0d77d608530d497fe4f44ffdd89312] 2024-12-08T00:22:18,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742504_1680 (size=12104) 2024-12-08T00:22:18,066 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/ff281ce7e7734a03a21ccd1517c49d31 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/ff281ce7e7734a03a21ccd1517c49d31 2024-12-08T00:22:18,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742505_1681 (size=4469) 2024-12-08T00:22:18,070 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8f0d77d608530d497fe4f44ffdd89312#A#compaction#578 average throughput is 0.35 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:22:18,071 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/6eb167abc5724f94b3eee1e92baa904b is 175, key is test_row_0/A:col10/1733617337481/Put/seqid=0 2024-12-08T00:22:18,072 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8f0d77d608530d497fe4f44ffdd89312/B of 8f0d77d608530d497fe4f44ffdd89312 into ff281ce7e7734a03a21ccd1517c49d31(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:22:18,072 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8f0d77d608530d497fe4f44ffdd89312: 2024-12-08T00:22:18,072 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312., storeName=8f0d77d608530d497fe4f44ffdd89312/B, priority=13, startTime=1733617337978; duration=0sec 2024-12-08T00:22:18,072 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:22:18,072 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8f0d77d608530d497fe4f44ffdd89312:B 2024-12-08T00:22:18,072 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:22:18,073 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:22:18,073 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): 8f0d77d608530d497fe4f44ffdd89312/C is initiating minor compaction (all files) 2024-12-08T00:22:18,073 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8f0d77d608530d497fe4f44ffdd89312/C in TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:18,073 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/6e038cb800be4b638348d8254525ae9b, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/1532fdc91e024c1190826ca5728b2759, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/cc6776a0821c4816afdcfa6520a04205] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp, totalSize=35.2 K 2024-12-08T00:22:18,074 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 6e038cb800be4b638348d8254525ae9b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1733617334995 2024-12-08T00:22:18,075 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 1532fdc91e024c1190826ca5728b2759, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1733617335052 2024-12-08T00:22:18,075 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting cc6776a0821c4816afdcfa6520a04205, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733617337176 2024-12-08T00:22:18,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742506_1682 (size=31058) 2024-12-08T00:22:18,085 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/6eb167abc5724f94b3eee1e92baa904b as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/6eb167abc5724f94b3eee1e92baa904b 2024-12-08T00:22:18,089 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8f0d77d608530d497fe4f44ffdd89312/A of 8f0d77d608530d497fe4f44ffdd89312 into 6eb167abc5724f94b3eee1e92baa904b(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:22:18,089 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8f0d77d608530d497fe4f44ffdd89312: 2024-12-08T00:22:18,089 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312., storeName=8f0d77d608530d497fe4f44ffdd89312/A, priority=13, startTime=1733617337978; duration=0sec 2024-12-08T00:22:18,090 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:22:18,090 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8f0d77d608530d497fe4f44ffdd89312:A 2024-12-08T00:22:18,098 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8f0d77d608530d497fe4f44ffdd89312#C#compaction#580 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:22:18,098 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/c87ba21481ab48058066a5a1aa551511 is 50, key is test_row_0/C:col10/1733617337481/Put/seqid=0 2024-12-08T00:22:18,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742507_1683 (size=12104) 2024-12-08T00:22:18,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on 8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:18,140 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8f0d77d608530d497fe4f44ffdd89312 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-08T00:22:18,140 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=A 2024-12-08T00:22:18,140 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:18,140 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=B 2024-12-08T00:22:18,140 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:18,140 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=C 2024-12-08T00:22:18,141 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:18,147 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208a072cbdaf20c4939acc0459074e40c70_8f0d77d608530d497fe4f44ffdd89312 is 50, key is test_row_0/A:col10/1733617337523/Put/seqid=0 2024-12-08T00:22:18,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742508_1684 (size=12154) 2024-12-08T00:22:18,153 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:18,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617398151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:18,154 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:18,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617398152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:18,155 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:18,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617398152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:18,256 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:18,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617398254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:18,256 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:18,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617398255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:18,256 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:18,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617398255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:18,459 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:18,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617398457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:18,459 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:18,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617398457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:18,459 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:18,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617398458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:18,514 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/c87ba21481ab48058066a5a1aa551511 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/c87ba21481ab48058066a5a1aa551511 2024-12-08T00:22:18,518 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8f0d77d608530d497fe4f44ffdd89312/C of 8f0d77d608530d497fe4f44ffdd89312 into c87ba21481ab48058066a5a1aa551511(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:22:18,518 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8f0d77d608530d497fe4f44ffdd89312: 2024-12-08T00:22:18,518 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312., storeName=8f0d77d608530d497fe4f44ffdd89312/C, priority=13, startTime=1733617337978; duration=0sec 2024-12-08T00:22:18,518 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:22:18,518 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8f0d77d608530d497fe4f44ffdd89312:C 2024-12-08T00:22:18,551 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:18,554 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208a072cbdaf20c4939acc0459074e40c70_8f0d77d608530d497fe4f44ffdd89312 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208a072cbdaf20c4939acc0459074e40c70_8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:18,555 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/b2ddab6d65e64e4cb413c203bda539f2, store: [table=TestAcidGuarantees family=A region=8f0d77d608530d497fe4f44ffdd89312] 2024-12-08T00:22:18,555 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/b2ddab6d65e64e4cb413c203bda539f2 is 175, key is test_row_0/A:col10/1733617337523/Put/seqid=0 2024-12-08T00:22:18,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742509_1685 (size=30955) 2024-12-08T00:22:18,762 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:18,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617398760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:18,762 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:18,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617398762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:18,763 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:18,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617398762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:18,959 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=81, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/b2ddab6d65e64e4cb413c203bda539f2 2024-12-08T00:22:18,969 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/275da2b9671e41788c9145378fc97a45 is 50, key is test_row_0/B:col10/1733617337523/Put/seqid=0 2024-12-08T00:22:18,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742510_1686 (size=12001) 2024-12-08T00:22:19,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-12-08T00:22:19,085 INFO [Thread-2911 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 171 completed 2024-12-08T00:22:19,086 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T00:22:19,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=173, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees 2024-12-08T00:22:19,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-08T00:22:19,087 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=173, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:22:19,088 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=173, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:22:19,088 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=174, ppid=173, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:22:19,182 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:19,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733617399181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:19,183 DEBUG [Thread-2905 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4126 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312., hostname=017dd09fb407,36703,1733617179335, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T00:22:19,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-08T00:22:19,197 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:19,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50172 deadline: 1733617399196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:19,198 DEBUG [Thread-2901 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4141 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312., hostname=017dd09fb407,36703,1733617179335, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T00:22:19,239 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:22:19,240 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-12-08T00:22:19,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:19,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. as already flushing 2024-12-08T00:22:19,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:19,240 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:19,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:19,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:19,265 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:19,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617399263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:19,267 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:19,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617399266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:19,269 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:19,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617399268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:19,373 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=81 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/275da2b9671e41788c9145378fc97a45 2024-12-08T00:22:19,379 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/765a576133f141749322e3ee69531ece is 50, key is test_row_0/C:col10/1733617337523/Put/seqid=0 2024-12-08T00:22:19,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742511_1687 (size=12001) 2024-12-08T00:22:19,383 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=81 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/765a576133f141749322e3ee69531ece 2024-12-08T00:22:19,386 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/b2ddab6d65e64e4cb413c203bda539f2 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/b2ddab6d65e64e4cb413c203bda539f2 2024-12-08T00:22:19,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-08T00:22:19,389 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/b2ddab6d65e64e4cb413c203bda539f2, entries=150, sequenceid=81, filesize=30.2 K 2024-12-08T00:22:19,390 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/275da2b9671e41788c9145378fc97a45 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/275da2b9671e41788c9145378fc97a45 2024-12-08T00:22:19,392 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:22:19,393 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-12-08T00:22:19,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:19,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. as already flushing 2024-12-08T00:22:19,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:19,393 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:19,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:19,393 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/275da2b9671e41788c9145378fc97a45, entries=150, sequenceid=81, filesize=11.7 K 2024-12-08T00:22:19,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:19,394 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/765a576133f141749322e3ee69531ece as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/765a576133f141749322e3ee69531ece 2024-12-08T00:22:19,397 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/765a576133f141749322e3ee69531ece, entries=150, sequenceid=81, filesize=11.7 K 2024-12-08T00:22:19,397 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 8f0d77d608530d497fe4f44ffdd89312 in 1257ms, sequenceid=81, compaction requested=false 2024-12-08T00:22:19,398 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8f0d77d608530d497fe4f44ffdd89312: 2024-12-08T00:22:19,545 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:22:19,545 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-12-08T00:22:19,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:19,546 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2837): Flushing 8f0d77d608530d497fe4f44ffdd89312 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-08T00:22:19,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=A 2024-12-08T00:22:19,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:19,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=B 2024-12-08T00:22:19,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:19,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=C 2024-12-08T00:22:19,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:19,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208372afe1017f041f99602c6a49f0d10f4_8f0d77d608530d497fe4f44ffdd89312 is 50, key is test_row_0/A:col10/1733617338151/Put/seqid=0 2024-12-08T00:22:19,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742512_1688 (size=12154) 2024-12-08T00:22:19,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:19,563 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208372afe1017f041f99602c6a49f0d10f4_8f0d77d608530d497fe4f44ffdd89312 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208372afe1017f041f99602c6a49f0d10f4_8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:19,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/cb923cf8ef0d4f718bdf2589cb72bb42, store: [table=TestAcidGuarantees family=A region=8f0d77d608530d497fe4f44ffdd89312] 2024-12-08T00:22:19,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/cb923cf8ef0d4f718bdf2589cb72bb42 is 175, key is test_row_0/A:col10/1733617338151/Put/seqid=0 2024-12-08T00:22:19,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742513_1689 (size=30955) 2024-12-08T00:22:19,569 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=94, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/cb923cf8ef0d4f718bdf2589cb72bb42 2024-12-08T00:22:19,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/b4eeff693b944df9bd9390ad24b74cf0 is 50, key is test_row_0/B:col10/1733617338151/Put/seqid=0 2024-12-08T00:22:19,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742514_1690 (size=12001) 2024-12-08T00:22:19,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-08T00:22:19,979 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/b4eeff693b944df9bd9390ad24b74cf0 2024-12-08T00:22:19,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/bc1afc60aaad4871b19aa3152e8dbea3 is 50, key is test_row_0/C:col10/1733617338151/Put/seqid=0 2024-12-08T00:22:19,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742515_1691 (size=12001) 2024-12-08T00:22:20,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-08T00:22:20,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on 8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:20,271 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. as already flushing 2024-12-08T00:22:20,322 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:20,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617400321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:20,323 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:20,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617400321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:20,323 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:20,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617400322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:20,389 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/bc1afc60aaad4871b19aa3152e8dbea3 2024-12-08T00:22:20,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/cb923cf8ef0d4f718bdf2589cb72bb42 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/cb923cf8ef0d4f718bdf2589cb72bb42 2024-12-08T00:22:20,396 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/cb923cf8ef0d4f718bdf2589cb72bb42, entries=150, sequenceid=94, filesize=30.2 K 2024-12-08T00:22:20,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/b4eeff693b944df9bd9390ad24b74cf0 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/b4eeff693b944df9bd9390ad24b74cf0 2024-12-08T00:22:20,400 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/b4eeff693b944df9bd9390ad24b74cf0, entries=150, sequenceid=94, filesize=11.7 K 2024-12-08T00:22:20,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/bc1afc60aaad4871b19aa3152e8dbea3 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/bc1afc60aaad4871b19aa3152e8dbea3 2024-12-08T00:22:20,403 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/bc1afc60aaad4871b19aa3152e8dbea3, entries=150, sequenceid=94, filesize=11.7 K 2024-12-08T00:22:20,404 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 8f0d77d608530d497fe4f44ffdd89312 in 858ms, sequenceid=94, compaction requested=true 2024-12-08T00:22:20,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2538): Flush status journal for 8f0d77d608530d497fe4f44ffdd89312: 2024-12-08T00:22:20,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:20,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=174 2024-12-08T00:22:20,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4106): Remote procedure done, pid=174 2024-12-08T00:22:20,406 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=174, resume processing ppid=173 2024-12-08T00:22:20,406 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=174, ppid=173, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3170 sec 2024-12-08T00:22:20,407 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=173, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees in 1.3210 sec 2024-12-08T00:22:20,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on 8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:20,426 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8f0d77d608530d497fe4f44ffdd89312 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-08T00:22:20,426 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=A 2024-12-08T00:22:20,426 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:20,426 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=B 2024-12-08T00:22:20,426 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:20,426 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=C 2024-12-08T00:22:20,426 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:20,433 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208df468b0fd9904e79bac24f1cce28de32_8f0d77d608530d497fe4f44ffdd89312 is 50, key is test_row_0/A:col10/1733617340425/Put/seqid=0 2024-12-08T00:22:20,437 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:20,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617400435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:20,438 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:20,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617400436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:20,439 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:20,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617400437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:20,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742516_1692 (size=14594) 2024-12-08T00:22:20,541 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:20,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617400538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:20,541 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:20,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617400539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:20,541 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:20,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617400540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:20,744 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:20,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617400742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:20,745 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:20,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617400743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:20,745 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:20,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617400743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:20,840 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:20,844 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208df468b0fd9904e79bac24f1cce28de32_8f0d77d608530d497fe4f44ffdd89312 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208df468b0fd9904e79bac24f1cce28de32_8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:20,845 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/02ae70a374fb43a8902ec9dcc02427ba, store: [table=TestAcidGuarantees family=A region=8f0d77d608530d497fe4f44ffdd89312] 2024-12-08T00:22:20,846 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/02ae70a374fb43a8902ec9dcc02427ba is 175, key is test_row_0/A:col10/1733617340425/Put/seqid=0 2024-12-08T00:22:20,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742517_1693 (size=39549) 2024-12-08T00:22:20,851 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=120, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/02ae70a374fb43a8902ec9dcc02427ba 2024-12-08T00:22:20,857 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/6cb5321d290647de85f7f67f79e2270a is 50, key is test_row_0/B:col10/1733617340425/Put/seqid=0 2024-12-08T00:22:20,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742518_1694 (size=12001) 2024-12-08T00:22:21,046 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:21,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617401045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:21,047 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:21,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617401046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:21,053 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:21,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617401052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:21,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-08T00:22:21,191 INFO [Thread-2911 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 173 completed 2024-12-08T00:22:21,192 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T00:22:21,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=175, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees 2024-12-08T00:22:21,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-08T00:22:21,194 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=175, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:22:21,194 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=175, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:22:21,194 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=176, ppid=175, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:22:21,261 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=120 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/6cb5321d290647de85f7f67f79e2270a 2024-12-08T00:22:21,267 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/3e9d7db9c9c342179f7bfe52ef736d54 is 50, key is test_row_0/C:col10/1733617340425/Put/seqid=0 2024-12-08T00:22:21,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742519_1695 (size=12001) 2024-12-08T00:22:21,272 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=120 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/3e9d7db9c9c342179f7bfe52ef736d54 2024-12-08T00:22:21,275 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/02ae70a374fb43a8902ec9dcc02427ba as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/02ae70a374fb43a8902ec9dcc02427ba 2024-12-08T00:22:21,278 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/02ae70a374fb43a8902ec9dcc02427ba, entries=200, sequenceid=120, filesize=38.6 K 2024-12-08T00:22:21,279 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/6cb5321d290647de85f7f67f79e2270a as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/6cb5321d290647de85f7f67f79e2270a 2024-12-08T00:22:21,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,282 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/6cb5321d290647de85f7f67f79e2270a, entries=150, sequenceid=120, filesize=11.7 K 2024-12-08T00:22:21,282 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/3e9d7db9c9c342179f7bfe52ef736d54 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/3e9d7db9c9c342179f7bfe52ef736d54 2024-12-08T00:22:21,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,285 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/3e9d7db9c9c342179f7bfe52ef736d54, entries=150, sequenceid=120, filesize=11.7 K 2024-12-08T00:22:21,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,286 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for 8f0d77d608530d497fe4f44ffdd89312 in 860ms, sequenceid=120, compaction requested=true 2024-12-08T00:22:21,286 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8f0d77d608530d497fe4f44ffdd89312: 2024-12-08T00:22:21,286 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8f0d77d608530d497fe4f44ffdd89312:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:22:21,286 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:22:21,286 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8f0d77d608530d497fe4f44ffdd89312:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T00:22:21,286 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:22:21,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,286 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T00:22:21,286 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8f0d77d608530d497fe4f44ffdd89312:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:22:21,286 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:22:21,286 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T00:22:21,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,287 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T00:22:21,287 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 132517 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T00:22:21,287 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): 8f0d77d608530d497fe4f44ffdd89312/B is initiating minor compaction (all files) 2024-12-08T00:22:21,287 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): 8f0d77d608530d497fe4f44ffdd89312/A is initiating minor compaction (all files) 2024-12-08T00:22:21,287 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8f0d77d608530d497fe4f44ffdd89312/B in TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:21,287 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8f0d77d608530d497fe4f44ffdd89312/A in TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:21,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,287 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/6eb167abc5724f94b3eee1e92baa904b, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/b2ddab6d65e64e4cb413c203bda539f2, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/cb923cf8ef0d4f718bdf2589cb72bb42, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/02ae70a374fb43a8902ec9dcc02427ba] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp, totalSize=129.4 K 2024-12-08T00:22:21,287 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/ff281ce7e7734a03a21ccd1517c49d31, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/275da2b9671e41788c9145378fc97a45, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/b4eeff693b944df9bd9390ad24b74cf0, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/6cb5321d290647de85f7f67f79e2270a] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp, totalSize=47.0 K 2024-12-08T00:22:21,288 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:21,288 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. files: [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/6eb167abc5724f94b3eee1e92baa904b, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/b2ddab6d65e64e4cb413c203bda539f2, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/cb923cf8ef0d4f718bdf2589cb72bb42, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/02ae70a374fb43a8902ec9dcc02427ba] 2024-12-08T00:22:21,288 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting ff281ce7e7734a03a21ccd1517c49d31, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733617337176 2024-12-08T00:22:21,288 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6eb167abc5724f94b3eee1e92baa904b, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733617337176 2024-12-08T00:22:21,288 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 275da2b9671e41788c9145378fc97a45, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1733617337523 2024-12-08T00:22:21,288 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting b2ddab6d65e64e4cb413c203bda539f2, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1733617337523 2024-12-08T00:22:21,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,289 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting b4eeff693b944df9bd9390ad24b74cf0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1733617338149 2024-12-08T00:22:21,289 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting cb923cf8ef0d4f718bdf2589cb72bb42, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1733617338149 2024-12-08T00:22:21,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,289 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 02ae70a374fb43a8902ec9dcc02427ba, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1733617340316 2024-12-08T00:22:21,289 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 6cb5321d290647de85f7f67f79e2270a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1733617340319 2024-12-08T00:22:21,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-08T00:22:21,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,296 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8f0d77d608530d497fe4f44ffdd89312#B#compaction#590 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:22:21,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,296 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/db5b8f0153c749039f167b727c99dd77 is 50, key is test_row_0/B:col10/1733617340425/Put/seqid=0 2024-12-08T00:22:21,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,298 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=8f0d77d608530d497fe4f44ffdd89312] 2024-12-08T00:22:21,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,300 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412083626595e91234683ba9975c56b0d1c96_8f0d77d608530d497fe4f44ffdd89312 store=[table=TestAcidGuarantees family=A region=8f0d77d608530d497fe4f44ffdd89312] 2024-12-08T00:22:21,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742520_1696 (size=12241) 2024-12-08T00:22:21,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,302 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412083626595e91234683ba9975c56b0d1c96_8f0d77d608530d497fe4f44ffdd89312, store=[table=TestAcidGuarantees family=A region=8f0d77d608530d497fe4f44ffdd89312] 2024-12-08T00:22:21,302 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412083626595e91234683ba9975c56b0d1c96_8f0d77d608530d497fe4f44ffdd89312 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=8f0d77d608530d497fe4f44ffdd89312] 2024-12-08T00:22:21,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742521_1697 (size=4469) 2024-12-08T00:22:21,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,314 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/db5b8f0153c749039f167b727c99dd77 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/db5b8f0153c749039f167b727c99dd77 2024-12-08T00:22:21,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,319 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 8f0d77d608530d497fe4f44ffdd89312/B of 8f0d77d608530d497fe4f44ffdd89312 into db5b8f0153c749039f167b727c99dd77(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:22:21,319 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8f0d77d608530d497fe4f44ffdd89312: 2024-12-08T00:22:21,319 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312., storeName=8f0d77d608530d497fe4f44ffdd89312/B, priority=12, startTime=1733617341286; duration=0sec 2024-12-08T00:22:21,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,319 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:22:21,319 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8f0d77d608530d497fe4f44ffdd89312:B 2024-12-08T00:22:21,319 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T00:22:21,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,320 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T00:22:21,320 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): 8f0d77d608530d497fe4f44ffdd89312/C is initiating minor compaction (all files) 2024-12-08T00:22:21,321 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8f0d77d608530d497fe4f44ffdd89312/C in TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:21,321 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/c87ba21481ab48058066a5a1aa551511, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/765a576133f141749322e3ee69531ece, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/bc1afc60aaad4871b19aa3152e8dbea3, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/3e9d7db9c9c342179f7bfe52ef736d54] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp, totalSize=47.0 K 2024-12-08T00:22:21,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,321 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting c87ba21481ab48058066a5a1aa551511, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733617337176 2024-12-08T00:22:21,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,324 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 765a576133f141749322e3ee69531ece, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1733617337523 2024-12-08T00:22:21,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,324 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting bc1afc60aaad4871b19aa3152e8dbea3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1733617338149 2024-12-08T00:22:21,325 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 3e9d7db9c9c342179f7bfe52ef736d54, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1733617340319 2024-12-08T00:22:21,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,333 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8f0d77d608530d497fe4f44ffdd89312#C#compaction#592 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:22:21,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,334 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/c41f768a6d5746e58f4a98dbb4016985 is 50, key is test_row_0/C:col10/1733617340425/Put/seqid=0 2024-12-08T00:22:21,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742522_1698 (size=12241) 2024-12-08T00:22:21,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,344 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/c41f768a6d5746e58f4a98dbb4016985 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/c41f768a6d5746e58f4a98dbb4016985 2024-12-08T00:22:21,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,346 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:22:21,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,346 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-12-08T00:22:21,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:21,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,347 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2837): Flushing 8f0d77d608530d497fe4f44ffdd89312 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-08T00:22:21,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=A 2024-12-08T00:22:21,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:21,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=B 2024-12-08T00:22:21,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:21,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=C 2024-12-08T00:22:21,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:21,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,350 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 8f0d77d608530d497fe4f44ffdd89312/C of 8f0d77d608530d497fe4f44ffdd89312 into c41f768a6d5746e58f4a98dbb4016985(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:22:21,350 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8f0d77d608530d497fe4f44ffdd89312: 2024-12-08T00:22:21,350 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312., storeName=8f0d77d608530d497fe4f44ffdd89312/C, priority=12, startTime=1733617341286; duration=0sec 2024-12-08T00:22:21,350 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:22:21,350 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8f0d77d608530d497fe4f44ffdd89312:C 2024-12-08T00:22:21,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208cc31f4fd0fa745f597b443b811ba062a_8f0d77d608530d497fe4f44ffdd89312 is 50, key is test_row_0/A:col10/1733617340434/Put/seqid=0 2024-12-08T00:22:21,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742523_1699 (size=9764) 2024-12-08T00:22:21,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-08T00:22:21,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on 8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:21,550 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. as already flushing 2024-12-08T00:22:21,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,579 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:21,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617401577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:21,580 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:21,580 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:21,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617401577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:21,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617401577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:21,682 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:21,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617401681, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:21,682 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:21,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617401681, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:21,682 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:21,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617401681, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:21,724 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8f0d77d608530d497fe4f44ffdd89312#A#compaction#591 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:22:21,725 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/492a037176ba4f7cb5971051353529c2 is 175, key is test_row_0/A:col10/1733617340425/Put/seqid=0 2024-12-08T00:22:21,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742524_1700 (size=31195) 2024-12-08T00:22:21,735 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/492a037176ba4f7cb5971051353529c2 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/492a037176ba4f7cb5971051353529c2 2024-12-08T00:22:21,739 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 8f0d77d608530d497fe4f44ffdd89312/A of 8f0d77d608530d497fe4f44ffdd89312 into 492a037176ba4f7cb5971051353529c2(size=30.5 K), total size for store is 30.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:22:21,739 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8f0d77d608530d497fe4f44ffdd89312: 2024-12-08T00:22:21,739 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312., storeName=8f0d77d608530d497fe4f44ffdd89312/A, priority=12, startTime=1733617341286; duration=0sec 2024-12-08T00:22:21,739 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:22:21,739 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8f0d77d608530d497fe4f44ffdd89312:A 2024-12-08T00:22:21,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:21,760 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208cc31f4fd0fa745f597b443b811ba062a_8f0d77d608530d497fe4f44ffdd89312 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208cc31f4fd0fa745f597b443b811ba062a_8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:21,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/fb6a8d43b1494f5b8ebc223751787019, store: [table=TestAcidGuarantees family=A region=8f0d77d608530d497fe4f44ffdd89312] 2024-12-08T00:22:21,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/fb6a8d43b1494f5b8ebc223751787019 is 175, key is test_row_0/A:col10/1733617340434/Put/seqid=0 2024-12-08T00:22:21,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742525_1701 (size=22411) 2024-12-08T00:22:21,766 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=132, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/fb6a8d43b1494f5b8ebc223751787019 2024-12-08T00:22:21,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/ae9f34b7df4345b7b814435331e9d580 is 50, key is test_row_0/B:col10/1733617340434/Put/seqid=0 2024-12-08T00:22:21,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742526_1702 (size=9707) 2024-12-08T00:22:21,777 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/ae9f34b7df4345b7b814435331e9d580 2024-12-08T00:22:21,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/9c435a3cf6924972bd29422e36b3e1dc is 50, key is test_row_0/C:col10/1733617340434/Put/seqid=0 2024-12-08T00:22:21,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742527_1703 (size=9707) 2024-12-08T00:22:21,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-08T00:22:21,884 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:21,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617401883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:21,885 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:21,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617401884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:21,886 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:21,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617401884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:22,187 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:22,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617402186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:22,189 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/9c435a3cf6924972bd29422e36b3e1dc 2024-12-08T00:22:22,188 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:22,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617402187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:22,189 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:22,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617402188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:22,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/fb6a8d43b1494f5b8ebc223751787019 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/fb6a8d43b1494f5b8ebc223751787019 2024-12-08T00:22:22,195 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/fb6a8d43b1494f5b8ebc223751787019, entries=100, sequenceid=132, filesize=21.9 K 2024-12-08T00:22:22,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/ae9f34b7df4345b7b814435331e9d580 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/ae9f34b7df4345b7b814435331e9d580 2024-12-08T00:22:22,199 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/ae9f34b7df4345b7b814435331e9d580, entries=100, sequenceid=132, filesize=9.5 K 2024-12-08T00:22:22,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/9c435a3cf6924972bd29422e36b3e1dc as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/9c435a3cf6924972bd29422e36b3e1dc 2024-12-08T00:22:22,203 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/9c435a3cf6924972bd29422e36b3e1dc, entries=100, sequenceid=132, filesize=9.5 K 2024-12-08T00:22:22,203 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=161.02 KB/164880 for 8f0d77d608530d497fe4f44ffdd89312 in 857ms, sequenceid=132, compaction requested=false 2024-12-08T00:22:22,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2538): Flush status journal for 8f0d77d608530d497fe4f44ffdd89312: 2024-12-08T00:22:22,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:22,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=176 2024-12-08T00:22:22,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4106): Remote procedure done, pid=176 2024-12-08T00:22:22,205 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=176, resume processing ppid=175 2024-12-08T00:22:22,205 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=176, ppid=175, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0100 sec 2024-12-08T00:22:22,207 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=175, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees in 1.0140 sec 2024-12-08T00:22:22,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-08T00:22:22,297 INFO [Thread-2911 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 175 completed 2024-12-08T00:22:22,298 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T00:22:22,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=177, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees 2024-12-08T00:22:22,299 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=177, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:22:22,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-12-08T00:22:22,300 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=177, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:22:22,300 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=178, ppid=177, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:22:22,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-12-08T00:22:22,451 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:22:22,451 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-12-08T00:22:22,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:22,452 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2837): Flushing 8f0d77d608530d497fe4f44ffdd89312 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-08T00:22:22,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=A 2024-12-08T00:22:22,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:22,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=B 2024-12-08T00:22:22,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:22,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=C 2024-12-08T00:22:22,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:22,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412083710b38ccf8a492c9511a2e90023977d_8f0d77d608530d497fe4f44ffdd89312 is 50, key is test_row_0/A:col10/1733617341571/Put/seqid=0 2024-12-08T00:22:22,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742528_1704 (size=12304) 2024-12-08T00:22:22,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:22,467 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412083710b38ccf8a492c9511a2e90023977d_8f0d77d608530d497fe4f44ffdd89312 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412083710b38ccf8a492c9511a2e90023977d_8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:22,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/edee421daf5f4cb9a6e875e1c2c46d66, store: [table=TestAcidGuarantees family=A region=8f0d77d608530d497fe4f44ffdd89312] 2024-12-08T00:22:22,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/edee421daf5f4cb9a6e875e1c2c46d66 is 175, key is test_row_0/A:col10/1733617341571/Put/seqid=0 2024-12-08T00:22:22,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742529_1705 (size=31105) 2024-12-08T00:22:22,475 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=161, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/edee421daf5f4cb9a6e875e1c2c46d66 2024-12-08T00:22:22,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/a659ff4523a74bc8bead45885d0bf0ae is 50, key is test_row_0/B:col10/1733617341571/Put/seqid=0 2024-12-08T00:22:22,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742530_1706 (size=12151) 2024-12-08T00:22:22,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-12-08T00:22:22,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on 8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:22,692 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. as already flushing 2024-12-08T00:22:22,703 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:22,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617402699, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:22,703 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:22,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617402700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:22,704 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:22,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617402700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:22,805 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:22,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617402804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:22,805 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:22,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617402804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:22,806 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:22,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617402805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:22,886 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=161 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/a659ff4523a74bc8bead45885d0bf0ae 2024-12-08T00:22:22,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/c63711b4e40e482b85f7d3286ff28422 is 50, key is test_row_0/C:col10/1733617341571/Put/seqid=0 2024-12-08T00:22:22,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742531_1707 (size=12151) 2024-12-08T00:22:22,897 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=161 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/c63711b4e40e482b85f7d3286ff28422 2024-12-08T00:22:22,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/edee421daf5f4cb9a6e875e1c2c46d66 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/edee421daf5f4cb9a6e875e1c2c46d66 2024-12-08T00:22:22,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-12-08T00:22:22,903 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/edee421daf5f4cb9a6e875e1c2c46d66, entries=150, sequenceid=161, filesize=30.4 K 2024-12-08T00:22:22,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/a659ff4523a74bc8bead45885d0bf0ae as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/a659ff4523a74bc8bead45885d0bf0ae 2024-12-08T00:22:22,907 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/a659ff4523a74bc8bead45885d0bf0ae, entries=150, sequenceid=161, filesize=11.9 K 2024-12-08T00:22:22,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/c63711b4e40e482b85f7d3286ff28422 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/c63711b4e40e482b85f7d3286ff28422 2024-12-08T00:22:22,911 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/c63711b4e40e482b85f7d3286ff28422, entries=150, sequenceid=161, filesize=11.9 K 2024-12-08T00:22:22,911 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=46.96 KB/48090 for 8f0d77d608530d497fe4f44ffdd89312 in 460ms, sequenceid=161, compaction requested=true 2024-12-08T00:22:22,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2538): Flush status journal for 8f0d77d608530d497fe4f44ffdd89312: 2024-12-08T00:22:22,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:22,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=178 2024-12-08T00:22:22,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4106): Remote procedure done, pid=178 2024-12-08T00:22:22,913 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=178, resume processing ppid=177 2024-12-08T00:22:22,913 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=178, ppid=177, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 612 msec 2024-12-08T00:22:22,915 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=177, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees in 616 msec 2024-12-08T00:22:23,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on 8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:23,008 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8f0d77d608530d497fe4f44ffdd89312 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-08T00:22:23,009 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=A 2024-12-08T00:22:23,009 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:23,009 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=B 2024-12-08T00:22:23,009 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:23,009 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=C 2024-12-08T00:22:23,009 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:23,015 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208b34c3ee2055d4aaaa73c4c264e2636cd_8f0d77d608530d497fe4f44ffdd89312 is 50, key is test_row_0/A:col10/1733617343007/Put/seqid=0 2024-12-08T00:22:23,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742532_1708 (size=17284) 2024-12-08T00:22:23,035 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:23,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617403033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:23,036 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:23,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617403034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:23,038 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:23,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617403036, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:23,138 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:23,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617403136, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:23,138 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:23,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617403137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:23,141 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:23,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617403139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:23,193 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:23,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733617403191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:23,193 DEBUG [Thread-2905 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8137 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312., hostname=017dd09fb407,36703,1733617179335, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T00:22:23,209 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:23,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50172 deadline: 1733617403207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:23,209 DEBUG [Thread-2901 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8152 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312., hostname=017dd09fb407,36703,1733617179335, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T00:22:23,340 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:23,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617403339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:23,341 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:23,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617403340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:23,343 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:23,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617403342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:23,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-12-08T00:22:23,403 INFO [Thread-2911 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 177 completed 2024-12-08T00:22:23,404 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T00:22:23,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=179, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=179, table=TestAcidGuarantees 2024-12-08T00:22:23,406 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=179, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=179, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:22:23,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-08T00:22:23,406 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=179, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=179, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:22:23,406 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=180, ppid=179, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:22:23,420 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:23,423 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208b34c3ee2055d4aaaa73c4c264e2636cd_8f0d77d608530d497fe4f44ffdd89312 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208b34c3ee2055d4aaaa73c4c264e2636cd_8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:23,424 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/5f8157212d3246d8b5ce58c89600539e, store: [table=TestAcidGuarantees family=A region=8f0d77d608530d497fe4f44ffdd89312] 2024-12-08T00:22:23,425 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/5f8157212d3246d8b5ce58c89600539e is 175, key is test_row_0/A:col10/1733617343007/Put/seqid=0 2024-12-08T00:22:23,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742533_1709 (size=48389) 2024-12-08T00:22:23,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-08T00:22:23,557 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:22:23,558 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-12-08T00:22:23,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:23,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. as already flushing 2024-12-08T00:22:23,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:23,558 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:23,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:23,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:23,644 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:23,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617403643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:23,646 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:23,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617403644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:23,648 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:23,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617403646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:23,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-08T00:22:23,710 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:22:23,711 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-12-08T00:22:23,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:23,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. as already flushing 2024-12-08T00:22:23,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:23,711 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:23,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:23,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:23,828 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=173, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/5f8157212d3246d8b5ce58c89600539e 2024-12-08T00:22:23,834 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/6995796b8b55439e95e07f369ee5de1f is 50, key is test_row_0/B:col10/1733617343007/Put/seqid=0 2024-12-08T00:22:23,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742534_1710 (size=12151) 2024-12-08T00:22:23,863 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:22:23,863 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-12-08T00:22:23,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:23,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. as already flushing 2024-12-08T00:22:23,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:23,864 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:23,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:23,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:24,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-08T00:22:24,016 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:22:24,016 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-12-08T00:22:24,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:24,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. as already flushing 2024-12-08T00:22:24,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:24,016 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:24,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:24,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:24,147 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:24,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617404145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:24,149 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:24,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617404147, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:24,152 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:24,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617404151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:24,168 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:22:24,169 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-12-08T00:22:24,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:24,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. as already flushing 2024-12-08T00:22:24,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:24,169 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:24,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:24,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:24,238 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/6995796b8b55439e95e07f369ee5de1f 2024-12-08T00:22:24,248 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/227abefac4f349f989d09b86d9d272ee is 50, key is test_row_0/C:col10/1733617343007/Put/seqid=0 2024-12-08T00:22:24,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742535_1711 (size=12151) 2024-12-08T00:22:24,320 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:22:24,320 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-12-08T00:22:24,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:24,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. as already flushing 2024-12-08T00:22:24,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:24,321 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:24,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:24,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:24,473 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:22:24,473 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-12-08T00:22:24,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:24,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. as already flushing 2024-12-08T00:22:24,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:24,473 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:24,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:24,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:24,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-08T00:22:24,625 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:22:24,626 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-12-08T00:22:24,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:24,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. as already flushing 2024-12-08T00:22:24,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:24,626 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:24,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:24,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:24,652 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/227abefac4f349f989d09b86d9d272ee 2024-12-08T00:22:24,656 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/5f8157212d3246d8b5ce58c89600539e as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/5f8157212d3246d8b5ce58c89600539e 2024-12-08T00:22:24,660 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/5f8157212d3246d8b5ce58c89600539e, entries=250, sequenceid=173, filesize=47.3 K 2024-12-08T00:22:24,660 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/6995796b8b55439e95e07f369ee5de1f as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/6995796b8b55439e95e07f369ee5de1f 2024-12-08T00:22:24,663 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/6995796b8b55439e95e07f369ee5de1f, entries=150, sequenceid=173, filesize=11.9 K 2024-12-08T00:22:24,664 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/227abefac4f349f989d09b86d9d272ee as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/227abefac4f349f989d09b86d9d272ee 2024-12-08T00:22:24,667 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/227abefac4f349f989d09b86d9d272ee, entries=150, sequenceid=173, filesize=11.9 K 2024-12-08T00:22:24,667 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 8f0d77d608530d497fe4f44ffdd89312 in 1659ms, sequenceid=173, compaction requested=true 2024-12-08T00:22:24,668 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8f0d77d608530d497fe4f44ffdd89312: 2024-12-08T00:22:24,668 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8f0d77d608530d497fe4f44ffdd89312:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:22:24,668 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:22:24,668 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T00:22:24,668 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8f0d77d608530d497fe4f44ffdd89312:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T00:22:24,668 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:22:24,668 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8f0d77d608530d497fe4f44ffdd89312:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:22:24,668 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:22:24,668 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T00:22:24,669 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 133100 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T00:22:24,669 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): 8f0d77d608530d497fe4f44ffdd89312/A is initiating minor compaction (all files) 2024-12-08T00:22:24,669 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 46250 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T00:22:24,669 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): 8f0d77d608530d497fe4f44ffdd89312/B is initiating minor compaction (all files) 2024-12-08T00:22:24,669 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8f0d77d608530d497fe4f44ffdd89312/A in TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:24,669 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8f0d77d608530d497fe4f44ffdd89312/B in TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:24,669 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/492a037176ba4f7cb5971051353529c2, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/fb6a8d43b1494f5b8ebc223751787019, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/edee421daf5f4cb9a6e875e1c2c46d66, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/5f8157212d3246d8b5ce58c89600539e] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp, totalSize=130.0 K 2024-12-08T00:22:24,669 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:24,669 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/db5b8f0153c749039f167b727c99dd77, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/ae9f34b7df4345b7b814435331e9d580, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/a659ff4523a74bc8bead45885d0bf0ae, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/6995796b8b55439e95e07f369ee5de1f] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp, totalSize=45.2 K 2024-12-08T00:22:24,669 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. files: [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/492a037176ba4f7cb5971051353529c2, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/fb6a8d43b1494f5b8ebc223751787019, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/edee421daf5f4cb9a6e875e1c2c46d66, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/5f8157212d3246d8b5ce58c89600539e] 2024-12-08T00:22:24,670 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting db5b8f0153c749039f167b727c99dd77, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1733617340319 2024-12-08T00:22:24,670 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 492a037176ba4f7cb5971051353529c2, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1733617340319 2024-12-08T00:22:24,670 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting fb6a8d43b1494f5b8ebc223751787019, keycount=100, bloomtype=ROW, size=21.9 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1733617340434 2024-12-08T00:22:24,670 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting ae9f34b7df4345b7b814435331e9d580, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1733617340434 2024-12-08T00:22:24,670 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting edee421daf5f4cb9a6e875e1c2c46d66, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1733617341571 2024-12-08T00:22:24,670 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting a659ff4523a74bc8bead45885d0bf0ae, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1733617341571 2024-12-08T00:22:24,671 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5f8157212d3246d8b5ce58c89600539e, keycount=250, bloomtype=ROW, size=47.3 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1733617342691 2024-12-08T00:22:24,671 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 6995796b8b55439e95e07f369ee5de1f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1733617342694 2024-12-08T00:22:24,677 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=8f0d77d608530d497fe4f44ffdd89312] 2024-12-08T00:22:24,679 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241208352cf85e23884058bad5498f92dd043c_8f0d77d608530d497fe4f44ffdd89312 store=[table=TestAcidGuarantees family=A region=8f0d77d608530d497fe4f44ffdd89312] 2024-12-08T00:22:24,680 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8f0d77d608530d497fe4f44ffdd89312#B#compaction#603 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:22:24,680 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/201ae3c5dd5c4c279046f18a0c1b1013 is 50, key is test_row_0/B:col10/1733617343007/Put/seqid=0 2024-12-08T00:22:24,681 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241208352cf85e23884058bad5498f92dd043c_8f0d77d608530d497fe4f44ffdd89312, store=[table=TestAcidGuarantees family=A region=8f0d77d608530d497fe4f44ffdd89312] 2024-12-08T00:22:24,682 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208352cf85e23884058bad5498f92dd043c_8f0d77d608530d497fe4f44ffdd89312 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=8f0d77d608530d497fe4f44ffdd89312] 2024-12-08T00:22:24,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742536_1712 (size=12527) 2024-12-08T00:22:24,687 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/201ae3c5dd5c4c279046f18a0c1b1013 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/201ae3c5dd5c4c279046f18a0c1b1013 2024-12-08T00:22:24,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742537_1713 (size=4469) 2024-12-08T00:22:24,692 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 8f0d77d608530d497fe4f44ffdd89312/B of 8f0d77d608530d497fe4f44ffdd89312 into 201ae3c5dd5c4c279046f18a0c1b1013(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:22:24,692 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8f0d77d608530d497fe4f44ffdd89312: 2024-12-08T00:22:24,692 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312., storeName=8f0d77d608530d497fe4f44ffdd89312/B, priority=12, startTime=1733617344668; duration=0sec 2024-12-08T00:22:24,692 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:22:24,692 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8f0d77d608530d497fe4f44ffdd89312:B 2024-12-08T00:22:24,692 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T00:22:24,693 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 46250 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T00:22:24,693 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): 8f0d77d608530d497fe4f44ffdd89312/C is initiating minor compaction (all files) 2024-12-08T00:22:24,693 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8f0d77d608530d497fe4f44ffdd89312/C in TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:24,693 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/c41f768a6d5746e58f4a98dbb4016985, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/9c435a3cf6924972bd29422e36b3e1dc, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/c63711b4e40e482b85f7d3286ff28422, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/227abefac4f349f989d09b86d9d272ee] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp, totalSize=45.2 K 2024-12-08T00:22:24,693 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting c41f768a6d5746e58f4a98dbb4016985, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1733617340319 2024-12-08T00:22:24,694 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 9c435a3cf6924972bd29422e36b3e1dc, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1733617340434 2024-12-08T00:22:24,694 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting c63711b4e40e482b85f7d3286ff28422, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1733617341571 2024-12-08T00:22:24,694 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 227abefac4f349f989d09b86d9d272ee, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1733617342694 2024-12-08T00:22:24,701 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8f0d77d608530d497fe4f44ffdd89312#C#compaction#604 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:22:24,701 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/1be7bb01eabe4bd58ee7d4a68e118ec2 is 50, key is test_row_0/C:col10/1733617343007/Put/seqid=0 2024-12-08T00:22:24,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742538_1714 (size=12527) 2024-12-08T00:22:24,778 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:22:24,778 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-12-08T00:22:24,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:24,778 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2837): Flushing 8f0d77d608530d497fe4f44ffdd89312 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-08T00:22:24,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=A 2024-12-08T00:22:24,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:24,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=B 2024-12-08T00:22:24,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:24,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=C 2024-12-08T00:22:24,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:24,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412084464477a9cbb44fb93e98705ed2abc23_8f0d77d608530d497fe4f44ffdd89312 is 50, key is test_row_0/A:col10/1733617343034/Put/seqid=0 2024-12-08T00:22:24,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742539_1715 (size=12304) 2024-12-08T00:22:25,090 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8f0d77d608530d497fe4f44ffdd89312#A#compaction#602 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:22:25,091 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/7341f09dd64f40ea8a18beef4634e21a is 175, key is test_row_0/A:col10/1733617343007/Put/seqid=0 2024-12-08T00:22:25,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742540_1716 (size=31481) 2024-12-08T00:22:25,108 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/1be7bb01eabe4bd58ee7d4a68e118ec2 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/1be7bb01eabe4bd58ee7d4a68e118ec2 2024-12-08T00:22:25,112 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 8f0d77d608530d497fe4f44ffdd89312/C of 8f0d77d608530d497fe4f44ffdd89312 into 1be7bb01eabe4bd58ee7d4a68e118ec2(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:22:25,112 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8f0d77d608530d497fe4f44ffdd89312: 2024-12-08T00:22:25,112 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312., storeName=8f0d77d608530d497fe4f44ffdd89312/C, priority=12, startTime=1733617344668; duration=0sec 2024-12-08T00:22:25,112 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:22:25,112 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8f0d77d608530d497fe4f44ffdd89312:C 2024-12-08T00:22:25,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on 8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:25,150 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. as already flushing 2024-12-08T00:22:25,164 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:25,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617405162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:25,164 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:25,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617405163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:25,165 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:25,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617405163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:25,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:25,193 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412084464477a9cbb44fb93e98705ed2abc23_8f0d77d608530d497fe4f44ffdd89312 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412084464477a9cbb44fb93e98705ed2abc23_8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:25,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/1771ce5741e8473b858cb2cd2c8fcc3f, store: [table=TestAcidGuarantees family=A region=8f0d77d608530d497fe4f44ffdd89312] 2024-12-08T00:22:25,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/1771ce5741e8473b858cb2cd2c8fcc3f is 175, key is test_row_0/A:col10/1733617343034/Put/seqid=0 2024-12-08T00:22:25,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742541_1717 (size=31105) 2024-12-08T00:22:25,267 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:25,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617405265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:25,268 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:25,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617405266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:25,268 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:25,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617405266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:25,470 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:25,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617405469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:25,471 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:25,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617405469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:25,471 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:25,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617405469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:25,499 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/7341f09dd64f40ea8a18beef4634e21a as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/7341f09dd64f40ea8a18beef4634e21a 2024-12-08T00:22:25,502 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 8f0d77d608530d497fe4f44ffdd89312/A of 8f0d77d608530d497fe4f44ffdd89312 into 7341f09dd64f40ea8a18beef4634e21a(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:22:25,502 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8f0d77d608530d497fe4f44ffdd89312: 2024-12-08T00:22:25,502 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312., storeName=8f0d77d608530d497fe4f44ffdd89312/A, priority=12, startTime=1733617344668; duration=0sec 2024-12-08T00:22:25,502 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:22:25,503 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8f0d77d608530d497fe4f44ffdd89312:A 2024-12-08T00:22:25,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-08T00:22:25,601 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=198, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/1771ce5741e8473b858cb2cd2c8fcc3f 2024-12-08T00:22:25,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/5fc2846ab46f4905998be69e2c7eacce is 50, key is test_row_0/B:col10/1733617343034/Put/seqid=0 2024-12-08T00:22:25,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742542_1718 (size=12151) 2024-12-08T00:22:25,610 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=198 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/5fc2846ab46f4905998be69e2c7eacce 2024-12-08T00:22:25,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/8901f5eb781a420792342a0f05f30bed is 50, key is test_row_0/C:col10/1733617343034/Put/seqid=0 2024-12-08T00:22:25,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742543_1719 (size=12151) 2024-12-08T00:22:25,622 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=198 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/8901f5eb781a420792342a0f05f30bed 2024-12-08T00:22:25,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/1771ce5741e8473b858cb2cd2c8fcc3f as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/1771ce5741e8473b858cb2cd2c8fcc3f 2024-12-08T00:22:25,628 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/1771ce5741e8473b858cb2cd2c8fcc3f, entries=150, sequenceid=198, filesize=30.4 K 2024-12-08T00:22:25,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/5fc2846ab46f4905998be69e2c7eacce as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/5fc2846ab46f4905998be69e2c7eacce 2024-12-08T00:22:25,632 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/5fc2846ab46f4905998be69e2c7eacce, entries=150, sequenceid=198, filesize=11.9 K 2024-12-08T00:22:25,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/8901f5eb781a420792342a0f05f30bed as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/8901f5eb781a420792342a0f05f30bed 2024-12-08T00:22:25,635 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/8901f5eb781a420792342a0f05f30bed, entries=150, sequenceid=198, filesize=11.9 K 2024-12-08T00:22:25,636 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 8f0d77d608530d497fe4f44ffdd89312 in 858ms, sequenceid=198, compaction requested=false 2024-12-08T00:22:25,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2538): Flush status journal for 8f0d77d608530d497fe4f44ffdd89312: 2024-12-08T00:22:25,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:25,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=180 2024-12-08T00:22:25,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4106): Remote procedure done, pid=180 2024-12-08T00:22:25,638 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=180, resume processing ppid=179 2024-12-08T00:22:25,638 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=180, ppid=179, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2310 sec 2024-12-08T00:22:25,639 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=179, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=179, table=TestAcidGuarantees in 2.2340 sec 2024-12-08T00:22:25,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on 8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:25,774 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8f0d77d608530d497fe4f44ffdd89312 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-08T00:22:25,774 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=A 2024-12-08T00:22:25,774 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:25,774 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=B 2024-12-08T00:22:25,774 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:25,774 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=C 2024-12-08T00:22:25,774 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:25,781 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120884a7a7fc24ca41ebac29b80fa72592b0_8f0d77d608530d497fe4f44ffdd89312 is 50, key is test_row_0/A:col10/1733617345162/Put/seqid=0 2024-12-08T00:22:25,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742544_1720 (size=14794) 2024-12-08T00:22:25,801 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:25,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617405798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:25,803 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:25,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617405801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:25,804 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:25,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617405801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:25,904 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:25,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617405902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:25,906 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:25,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617405904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:25,906 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:25,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617405905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:26,107 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:26,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617406106, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:26,109 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:26,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617406107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:26,110 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:26,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617406108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:26,190 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:26,193 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120884a7a7fc24ca41ebac29b80fa72592b0_8f0d77d608530d497fe4f44ffdd89312 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120884a7a7fc24ca41ebac29b80fa72592b0_8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:26,194 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/ae4162dfed424dcf93250c43d47f7d79, store: [table=TestAcidGuarantees family=A region=8f0d77d608530d497fe4f44ffdd89312] 2024-12-08T00:22:26,194 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/ae4162dfed424dcf93250c43d47f7d79 is 175, key is test_row_0/A:col10/1733617345162/Put/seqid=0 2024-12-08T00:22:26,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742545_1721 (size=39749) 2024-12-08T00:22:26,198 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=213, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/ae4162dfed424dcf93250c43d47f7d79 2024-12-08T00:22:26,205 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/dbddeb9674ad4f5fa12e24982174b2fc is 50, key is test_row_0/B:col10/1733617345162/Put/seqid=0 2024-12-08T00:22:26,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742546_1722 (size=12151) 2024-12-08T00:22:26,208 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/dbddeb9674ad4f5fa12e24982174b2fc 2024-12-08T00:22:26,214 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/c3228171656e46c5b654457ec2729410 is 50, key is test_row_0/C:col10/1733617345162/Put/seqid=0 2024-12-08T00:22:26,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742547_1723 (size=12151) 2024-12-08T00:22:26,217 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/c3228171656e46c5b654457ec2729410 2024-12-08T00:22:26,221 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/ae4162dfed424dcf93250c43d47f7d79 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/ae4162dfed424dcf93250c43d47f7d79 2024-12-08T00:22:26,223 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/ae4162dfed424dcf93250c43d47f7d79, entries=200, sequenceid=213, filesize=38.8 K 2024-12-08T00:22:26,224 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/dbddeb9674ad4f5fa12e24982174b2fc as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/dbddeb9674ad4f5fa12e24982174b2fc 2024-12-08T00:22:26,227 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/dbddeb9674ad4f5fa12e24982174b2fc, entries=150, sequenceid=213, filesize=11.9 K 2024-12-08T00:22:26,228 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/c3228171656e46c5b654457ec2729410 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/c3228171656e46c5b654457ec2729410 2024-12-08T00:22:26,231 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/c3228171656e46c5b654457ec2729410, entries=150, sequenceid=213, filesize=11.9 K 2024-12-08T00:22:26,231 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 8f0d77d608530d497fe4f44ffdd89312 in 458ms, sequenceid=213, compaction requested=true 2024-12-08T00:22:26,231 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8f0d77d608530d497fe4f44ffdd89312: 2024-12-08T00:22:26,231 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8f0d77d608530d497fe4f44ffdd89312:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:22:26,231 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:22:26,231 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8f0d77d608530d497fe4f44ffdd89312:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T00:22:26,231 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:22:26,231 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8f0d77d608530d497fe4f44ffdd89312:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:22:26,232 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:22:26,232 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:22:26,232 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:22:26,232 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36829 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:22:26,232 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102335 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:22:26,232 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): 8f0d77d608530d497fe4f44ffdd89312/A is initiating minor compaction (all files) 2024-12-08T00:22:26,232 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): 8f0d77d608530d497fe4f44ffdd89312/B is initiating minor compaction (all files) 2024-12-08T00:22:26,232 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8f0d77d608530d497fe4f44ffdd89312/A in TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:26,232 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8f0d77d608530d497fe4f44ffdd89312/B in TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:26,233 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/7341f09dd64f40ea8a18beef4634e21a, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/1771ce5741e8473b858cb2cd2c8fcc3f, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/ae4162dfed424dcf93250c43d47f7d79] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp, totalSize=99.9 K 2024-12-08T00:22:26,233 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:26,233 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/201ae3c5dd5c4c279046f18a0c1b1013, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/5fc2846ab46f4905998be69e2c7eacce, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/dbddeb9674ad4f5fa12e24982174b2fc] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp, totalSize=36.0 K 2024-12-08T00:22:26,233 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. files: [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/7341f09dd64f40ea8a18beef4634e21a, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/1771ce5741e8473b858cb2cd2c8fcc3f, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/ae4162dfed424dcf93250c43d47f7d79] 2024-12-08T00:22:26,233 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 201ae3c5dd5c4c279046f18a0c1b1013, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1733617342694 2024-12-08T00:22:26,233 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7341f09dd64f40ea8a18beef4634e21a, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1733617342694 2024-12-08T00:22:26,233 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1771ce5741e8473b858cb2cd2c8fcc3f, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1733617343027 2024-12-08T00:22:26,233 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 5fc2846ab46f4905998be69e2c7eacce, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1733617343027 2024-12-08T00:22:26,234 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting ae4162dfed424dcf93250c43d47f7d79, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1733617345157 2024-12-08T00:22:26,234 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting dbddeb9674ad4f5fa12e24982174b2fc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1733617345157 2024-12-08T00:22:26,239 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=8f0d77d608530d497fe4f44ffdd89312] 2024-12-08T00:22:26,240 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8f0d77d608530d497fe4f44ffdd89312#B#compaction#611 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:22:26,241 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/f37f020a22de48c39cf5c43c4af8bbc4 is 50, key is test_row_0/B:col10/1733617345162/Put/seqid=0 2024-12-08T00:22:26,241 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241208904b968547bb4d5d97a365cabdb327a8_8f0d77d608530d497fe4f44ffdd89312 store=[table=TestAcidGuarantees family=A region=8f0d77d608530d497fe4f44ffdd89312] 2024-12-08T00:22:26,243 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241208904b968547bb4d5d97a365cabdb327a8_8f0d77d608530d497fe4f44ffdd89312, store=[table=TestAcidGuarantees family=A region=8f0d77d608530d497fe4f44ffdd89312] 2024-12-08T00:22:26,244 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208904b968547bb4d5d97a365cabdb327a8_8f0d77d608530d497fe4f44ffdd89312 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=8f0d77d608530d497fe4f44ffdd89312] 2024-12-08T00:22:26,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742548_1724 (size=12629) 2024-12-08T00:22:26,248 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/f37f020a22de48c39cf5c43c4af8bbc4 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/f37f020a22de48c39cf5c43c4af8bbc4 2024-12-08T00:22:26,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742549_1725 (size=4469) 2024-12-08T00:22:26,250 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8f0d77d608530d497fe4f44ffdd89312#A#compaction#612 average throughput is 2.22 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:22:26,251 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/3b2616f3dd53468784f301426382871e is 175, key is test_row_0/A:col10/1733617345162/Put/seqid=0 2024-12-08T00:22:26,253 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8f0d77d608530d497fe4f44ffdd89312/B of 8f0d77d608530d497fe4f44ffdd89312 into f37f020a22de48c39cf5c43c4af8bbc4(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:22:26,253 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8f0d77d608530d497fe4f44ffdd89312: 2024-12-08T00:22:26,253 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312., storeName=8f0d77d608530d497fe4f44ffdd89312/B, priority=13, startTime=1733617346231; duration=0sec 2024-12-08T00:22:26,253 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:22:26,253 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8f0d77d608530d497fe4f44ffdd89312:B 2024-12-08T00:22:26,253 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:22:26,254 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36829 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:22:26,254 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): 8f0d77d608530d497fe4f44ffdd89312/C is initiating minor compaction (all files) 2024-12-08T00:22:26,254 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8f0d77d608530d497fe4f44ffdd89312/C in TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:26,254 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/1be7bb01eabe4bd58ee7d4a68e118ec2, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/8901f5eb781a420792342a0f05f30bed, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/c3228171656e46c5b654457ec2729410] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp, totalSize=36.0 K 2024-12-08T00:22:26,255 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 1be7bb01eabe4bd58ee7d4a68e118ec2, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1733617342694 2024-12-08T00:22:26,255 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 8901f5eb781a420792342a0f05f30bed, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1733617343027 2024-12-08T00:22:26,256 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting c3228171656e46c5b654457ec2729410, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1733617345157 2024-12-08T00:22:26,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742550_1726 (size=31583) 2024-12-08T00:22:26,261 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/3b2616f3dd53468784f301426382871e as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/3b2616f3dd53468784f301426382871e 2024-12-08T00:22:26,263 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8f0d77d608530d497fe4f44ffdd89312#C#compaction#613 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:22:26,263 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/93f6e066b27844a997844ff51e5daf4c is 50, key is test_row_0/C:col10/1733617345162/Put/seqid=0 2024-12-08T00:22:26,266 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8f0d77d608530d497fe4f44ffdd89312/A of 8f0d77d608530d497fe4f44ffdd89312 into 3b2616f3dd53468784f301426382871e(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:22:26,266 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8f0d77d608530d497fe4f44ffdd89312: 2024-12-08T00:22:26,266 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312., storeName=8f0d77d608530d497fe4f44ffdd89312/A, priority=13, startTime=1733617346231; duration=0sec 2024-12-08T00:22:26,266 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:22:26,266 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8f0d77d608530d497fe4f44ffdd89312:A 2024-12-08T00:22:26,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742551_1727 (size=12629) 2024-12-08T00:22:26,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on 8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:26,412 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8f0d77d608530d497fe4f44ffdd89312 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-08T00:22:26,413 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=A 2024-12-08T00:22:26,413 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:26,414 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=B 2024-12-08T00:22:26,414 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:26,414 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=C 2024-12-08T00:22:26,414 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:26,419 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208431bba8f40684b368dfbf9346deec52f_8f0d77d608530d497fe4f44ffdd89312 is 50, key is test_row_0/A:col10/1733617345796/Put/seqid=0 2024-12-08T00:22:26,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742552_1728 (size=12304) 2024-12-08T00:22:26,423 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:26,425 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:26,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617406424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:26,427 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:26,427 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208431bba8f40684b368dfbf9346deec52f_8f0d77d608530d497fe4f44ffdd89312 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208431bba8f40684b368dfbf9346deec52f_8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:26,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617406425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:26,427 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:26,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617406425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:26,428 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/4803b19a6b8d4d8c800574d300e7205f, store: [table=TestAcidGuarantees family=A region=8f0d77d608530d497fe4f44ffdd89312] 2024-12-08T00:22:26,428 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/4803b19a6b8d4d8c800574d300e7205f is 175, key is test_row_0/A:col10/1733617345796/Put/seqid=0 2024-12-08T00:22:26,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742553_1729 (size=31105) 2024-12-08T00:22:26,435 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=240, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/4803b19a6b8d4d8c800574d300e7205f 2024-12-08T00:22:26,440 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/bf08613baed44a719083014303e545b6 is 50, key is test_row_0/B:col10/1733617345796/Put/seqid=0 2024-12-08T00:22:26,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742554_1730 (size=12151) 2024-12-08T00:22:26,465 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=240 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/bf08613baed44a719083014303e545b6 2024-12-08T00:22:26,471 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/42e00882621447b394c8c350a878af9e is 50, key is test_row_0/C:col10/1733617345796/Put/seqid=0 2024-12-08T00:22:26,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742555_1731 (size=12151) 2024-12-08T00:22:26,475 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=240 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/42e00882621447b394c8c350a878af9e 2024-12-08T00:22:26,478 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/4803b19a6b8d4d8c800574d300e7205f as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/4803b19a6b8d4d8c800574d300e7205f 2024-12-08T00:22:26,481 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/4803b19a6b8d4d8c800574d300e7205f, entries=150, sequenceid=240, filesize=30.4 K 2024-12-08T00:22:26,482 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/bf08613baed44a719083014303e545b6 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/bf08613baed44a719083014303e545b6 2024-12-08T00:22:26,484 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/bf08613baed44a719083014303e545b6, entries=150, sequenceid=240, filesize=11.9 K 2024-12-08T00:22:26,485 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/42e00882621447b394c8c350a878af9e as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/42e00882621447b394c8c350a878af9e 2024-12-08T00:22:26,488 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/42e00882621447b394c8c350a878af9e, entries=150, sequenceid=240, filesize=11.9 K 2024-12-08T00:22:26,488 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 8f0d77d608530d497fe4f44ffdd89312 in 76ms, sequenceid=240, compaction requested=false 2024-12-08T00:22:26,489 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8f0d77d608530d497fe4f44ffdd89312: 2024-12-08T00:22:26,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on 8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:26,529 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8f0d77d608530d497fe4f44ffdd89312 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-08T00:22:26,529 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=A 2024-12-08T00:22:26,529 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:26,529 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=B 2024-12-08T00:22:26,529 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:26,529 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=C 2024-12-08T00:22:26,529 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:26,535 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208370f4c338ae2437aa42a8d087ffdc403_8f0d77d608530d497fe4f44ffdd89312 is 50, key is test_row_0/A:col10/1733617346415/Put/seqid=0 2024-12-08T00:22:26,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742556_1732 (size=14794) 2024-12-08T00:22:26,559 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:26,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617406558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:26,562 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:26,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617406559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:26,562 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:26,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617406560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:26,662 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:26,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617406661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:26,665 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:26,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617406663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:26,665 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:26,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617406663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:26,672 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/93f6e066b27844a997844ff51e5daf4c as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/93f6e066b27844a997844ff51e5daf4c 2024-12-08T00:22:26,676 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8f0d77d608530d497fe4f44ffdd89312/C of 8f0d77d608530d497fe4f44ffdd89312 into 93f6e066b27844a997844ff51e5daf4c(size=12.3 K), total size for store is 24.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:22:26,676 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8f0d77d608530d497fe4f44ffdd89312: 2024-12-08T00:22:26,676 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312., storeName=8f0d77d608530d497fe4f44ffdd89312/C, priority=13, startTime=1733617346231; duration=0sec 2024-12-08T00:22:26,676 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:22:26,676 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8f0d77d608530d497fe4f44ffdd89312:C 2024-12-08T00:22:26,866 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:26,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617406865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:26,868 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:26,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617406867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:26,868 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:26,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617406867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:26,939 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:26,943 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208370f4c338ae2437aa42a8d087ffdc403_8f0d77d608530d497fe4f44ffdd89312 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208370f4c338ae2437aa42a8d087ffdc403_8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:26,944 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/e4956cfedce84290bb376e027c27dbb7, store: [table=TestAcidGuarantees family=A region=8f0d77d608530d497fe4f44ffdd89312] 2024-12-08T00:22:26,944 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/e4956cfedce84290bb376e027c27dbb7 is 175, key is test_row_0/A:col10/1733617346415/Put/seqid=0 2024-12-08T00:22:26,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742557_1733 (size=39749) 2024-12-08T00:22:27,168 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:27,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617407167, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:27,172 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:27,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617407170, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:27,172 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:27,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617407171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:27,361 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=253, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/e4956cfedce84290bb376e027c27dbb7 2024-12-08T00:22:27,368 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/8e9a12ed547e4e2399b2c32ba8aea478 is 50, key is test_row_0/B:col10/1733617346415/Put/seqid=0 2024-12-08T00:22:27,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742558_1734 (size=12151) 2024-12-08T00:22:27,375 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/8e9a12ed547e4e2399b2c32ba8aea478 2024-12-08T00:22:27,382 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/fbd5c0053c43439084121ec5e367c58b is 50, key is test_row_0/C:col10/1733617346415/Put/seqid=0 2024-12-08T00:22:27,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742559_1735 (size=12151) 2024-12-08T00:22:27,395 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/fbd5c0053c43439084121ec5e367c58b 2024-12-08T00:22:27,399 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/e4956cfedce84290bb376e027c27dbb7 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/e4956cfedce84290bb376e027c27dbb7 2024-12-08T00:22:27,402 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/e4956cfedce84290bb376e027c27dbb7, entries=200, sequenceid=253, filesize=38.8 K 2024-12-08T00:22:27,403 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/8e9a12ed547e4e2399b2c32ba8aea478 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/8e9a12ed547e4e2399b2c32ba8aea478 2024-12-08T00:22:27,406 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/8e9a12ed547e4e2399b2c32ba8aea478, entries=150, sequenceid=253, filesize=11.9 K 2024-12-08T00:22:27,407 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/fbd5c0053c43439084121ec5e367c58b as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/fbd5c0053c43439084121ec5e367c58b 2024-12-08T00:22:27,411 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/fbd5c0053c43439084121ec5e367c58b, entries=150, sequenceid=253, filesize=11.9 K 2024-12-08T00:22:27,411 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 8f0d77d608530d497fe4f44ffdd89312 in 883ms, sequenceid=253, compaction requested=true 2024-12-08T00:22:27,411 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8f0d77d608530d497fe4f44ffdd89312: 2024-12-08T00:22:27,411 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8f0d77d608530d497fe4f44ffdd89312:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:22:27,411 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:22:27,411 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:22:27,412 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:22:27,412 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8f0d77d608530d497fe4f44ffdd89312:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T00:22:27,412 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:22:27,412 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8f0d77d608530d497fe4f44ffdd89312:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:22:27,412 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:22:27,412 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102437 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:22:27,412 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): 8f0d77d608530d497fe4f44ffdd89312/A is initiating minor compaction (all files) 2024-12-08T00:22:27,412 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8f0d77d608530d497fe4f44ffdd89312/A in TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:27,413 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/3b2616f3dd53468784f301426382871e, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/4803b19a6b8d4d8c800574d300e7205f, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/e4956cfedce84290bb376e027c27dbb7] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp, totalSize=100.0 K 2024-12-08T00:22:27,413 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:27,413 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. files: [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/3b2616f3dd53468784f301426382871e, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/4803b19a6b8d4d8c800574d300e7205f, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/e4956cfedce84290bb376e027c27dbb7] 2024-12-08T00:22:27,414 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:22:27,414 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): 8f0d77d608530d497fe4f44ffdd89312/B is initiating minor compaction (all files) 2024-12-08T00:22:27,414 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8f0d77d608530d497fe4f44ffdd89312/B in TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:27,414 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/f37f020a22de48c39cf5c43c4af8bbc4, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/bf08613baed44a719083014303e545b6, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/8e9a12ed547e4e2399b2c32ba8aea478] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp, totalSize=36.1 K 2024-12-08T00:22:27,414 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting f37f020a22de48c39cf5c43c4af8bbc4, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1733617345157 2024-12-08T00:22:27,415 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3b2616f3dd53468784f301426382871e, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1733617345157 2024-12-08T00:22:27,415 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting bf08613baed44a719083014303e545b6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1733617345796 2024-12-08T00:22:27,415 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4803b19a6b8d4d8c800574d300e7205f, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1733617345796 2024-12-08T00:22:27,415 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 8e9a12ed547e4e2399b2c32ba8aea478, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1733617346415 2024-12-08T00:22:27,415 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting e4956cfedce84290bb376e027c27dbb7, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1733617346415 2024-12-08T00:22:27,422 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8f0d77d608530d497fe4f44ffdd89312#B#compaction#620 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:22:27,422 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/95be750086e042698d6129488c2a2dac is 50, key is test_row_0/B:col10/1733617346415/Put/seqid=0 2024-12-08T00:22:27,423 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=8f0d77d608530d497fe4f44ffdd89312] 2024-12-08T00:22:27,432 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412081502d5811dc5491c91d5d6cd016567c1_8f0d77d608530d497fe4f44ffdd89312 store=[table=TestAcidGuarantees family=A region=8f0d77d608530d497fe4f44ffdd89312] 2024-12-08T00:22:27,434 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412081502d5811dc5491c91d5d6cd016567c1_8f0d77d608530d497fe4f44ffdd89312, store=[table=TestAcidGuarantees family=A region=8f0d77d608530d497fe4f44ffdd89312] 2024-12-08T00:22:27,434 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412081502d5811dc5491c91d5d6cd016567c1_8f0d77d608530d497fe4f44ffdd89312 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=8f0d77d608530d497fe4f44ffdd89312] 2024-12-08T00:22:27,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742560_1736 (size=12731) 2024-12-08T00:22:27,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742561_1737 (size=4469) 2024-12-08T00:22:27,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-08T00:22:27,510 INFO [Thread-2911 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 179 completed 2024-12-08T00:22:27,511 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T00:22:27,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=181, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=181, table=TestAcidGuarantees 2024-12-08T00:22:27,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-12-08T00:22:27,513 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=181, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=181, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:22:27,513 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=181, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=181, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:22:27,513 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=182, ppid=181, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:22:27,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-12-08T00:22:27,665 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:22:27,665 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=182 2024-12-08T00:22:27,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:27,665 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2837): Flushing 8f0d77d608530d497fe4f44ffdd89312 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-08T00:22:27,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=A 2024-12-08T00:22:27,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:27,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=B 2024-12-08T00:22:27,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:27,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=C 2024-12-08T00:22:27,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:27,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208b5e8fd16d0534ba590be449de36bd19e_8f0d77d608530d497fe4f44ffdd89312 is 50, key is test_row_0/A:col10/1733617346554/Put/seqid=0 2024-12-08T00:22:27,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on 8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:27,673 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. as already flushing 2024-12-08T00:22:27,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742562_1738 (size=12454) 2024-12-08T00:22:27,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:27,681 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208b5e8fd16d0534ba590be449de36bd19e_8f0d77d608530d497fe4f44ffdd89312 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208b5e8fd16d0534ba590be449de36bd19e_8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:27,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/2cc49d398ea64872adf64bd04ebe200d, store: [table=TestAcidGuarantees family=A region=8f0d77d608530d497fe4f44ffdd89312] 2024-12-08T00:22:27,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/2cc49d398ea64872adf64bd04ebe200d is 175, key is test_row_0/A:col10/1733617346554/Put/seqid=0 2024-12-08T00:22:27,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742563_1739 (size=31255) 2024-12-08T00:22:27,686 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:27,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617407683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:27,686 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=278, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/2cc49d398ea64872adf64bd04ebe200d 2024-12-08T00:22:27,687 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:27,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617407684, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:27,688 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:27,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617407686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:27,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/6968b09de5144176abf3f8e5bc1974bc is 50, key is test_row_0/B:col10/1733617346554/Put/seqid=0 2024-12-08T00:22:27,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742564_1740 (size=12301) 2024-12-08T00:22:27,696 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=278 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/6968b09de5144176abf3f8e5bc1974bc 2024-12-08T00:22:27,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/dc48c177d5174a26bb7663979c9c018c is 50, key is test_row_0/C:col10/1733617346554/Put/seqid=0 2024-12-08T00:22:27,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742565_1741 (size=12301) 2024-12-08T00:22:27,705 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=278 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/dc48c177d5174a26bb7663979c9c018c 2024-12-08T00:22:27,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/2cc49d398ea64872adf64bd04ebe200d as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/2cc49d398ea64872adf64bd04ebe200d 2024-12-08T00:22:27,711 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/2cc49d398ea64872adf64bd04ebe200d, entries=150, sequenceid=278, filesize=30.5 K 2024-12-08T00:22:27,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/6968b09de5144176abf3f8e5bc1974bc as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/6968b09de5144176abf3f8e5bc1974bc 2024-12-08T00:22:27,715 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/6968b09de5144176abf3f8e5bc1974bc, entries=150, sequenceid=278, filesize=12.0 K 2024-12-08T00:22:27,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/dc48c177d5174a26bb7663979c9c018c as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/dc48c177d5174a26bb7663979c9c018c 2024-12-08T00:22:27,719 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/dc48c177d5174a26bb7663979c9c018c, entries=150, sequenceid=278, filesize=12.0 K 2024-12-08T00:22:27,719 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 8f0d77d608530d497fe4f44ffdd89312 in 54ms, sequenceid=278, compaction requested=true 2024-12-08T00:22:27,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2538): Flush status journal for 8f0d77d608530d497fe4f44ffdd89312: 2024-12-08T00:22:27,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:27,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=182 2024-12-08T00:22:27,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4106): Remote procedure done, pid=182 2024-12-08T00:22:27,722 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=182, resume processing ppid=181 2024-12-08T00:22:27,722 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=182, ppid=181, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 207 msec 2024-12-08T00:22:27,723 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=181, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=181, table=TestAcidGuarantees in 212 msec 2024-12-08T00:22:27,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on 8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:27,788 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8f0d77d608530d497fe4f44ffdd89312 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-08T00:22:27,788 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=A 2024-12-08T00:22:27,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:27,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=B 2024-12-08T00:22:27,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:27,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=C 2024-12-08T00:22:27,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:27,794 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412088eb684d18d024ebfac5387b512915c3b_8f0d77d608530d497fe4f44ffdd89312 is 50, key is test_row_0/A:col10/1733617347787/Put/seqid=0 2024-12-08T00:22:27,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742566_1742 (size=12454) 2024-12-08T00:22:27,802 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:27,805 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412088eb684d18d024ebfac5387b512915c3b_8f0d77d608530d497fe4f44ffdd89312 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412088eb684d18d024ebfac5387b512915c3b_8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:27,805 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/e61f4e8185c7489ca0d584530cf5dfac, store: [table=TestAcidGuarantees family=A region=8f0d77d608530d497fe4f44ffdd89312] 2024-12-08T00:22:27,806 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/e61f4e8185c7489ca0d584530cf5dfac is 175, key is test_row_0/A:col10/1733617347787/Put/seqid=0 2024-12-08T00:22:27,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742567_1743 (size=31255) 2024-12-08T00:22:27,810 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=291, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/e61f4e8185c7489ca0d584530cf5dfac 2024-12-08T00:22:27,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-12-08T00:22:27,814 INFO [Thread-2911 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 181 completed 2024-12-08T00:22:27,816 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T00:22:27,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=183, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=183, table=TestAcidGuarantees 2024-12-08T00:22:27,817 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=183, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=183, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:22:27,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-12-08T00:22:27,817 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=183, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=183, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:22:27,818 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=184, ppid=183, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:22:27,819 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/a813e349af604e65b12cf9e052645cb1 is 50, key is test_row_0/B:col10/1733617347787/Put/seqid=0 2024-12-08T00:22:27,820 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:27,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617407817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:27,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742568_1744 (size=12301) 2024-12-08T00:22:27,823 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:27,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617407820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:27,823 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:27,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617407821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:27,847 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/95be750086e042698d6129488c2a2dac as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/95be750086e042698d6129488c2a2dac 2024-12-08T00:22:27,854 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8f0d77d608530d497fe4f44ffdd89312#A#compaction#621 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:22:27,855 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/e0dd830975bc4fb8aa7b0e93b7b292bb is 175, key is test_row_0/A:col10/1733617346415/Put/seqid=0 2024-12-08T00:22:27,855 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8f0d77d608530d497fe4f44ffdd89312/B of 8f0d77d608530d497fe4f44ffdd89312 into 95be750086e042698d6129488c2a2dac(size=12.4 K), total size for store is 24.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:22:27,855 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8f0d77d608530d497fe4f44ffdd89312: 2024-12-08T00:22:27,855 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312., storeName=8f0d77d608530d497fe4f44ffdd89312/B, priority=13, startTime=1733617347411; duration=0sec 2024-12-08T00:22:27,855 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:22:27,855 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8f0d77d608530d497fe4f44ffdd89312:B 2024-12-08T00:22:27,855 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T00:22:27,856 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49232 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T00:22:27,857 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): 8f0d77d608530d497fe4f44ffdd89312/C is initiating minor compaction (all files) 2024-12-08T00:22:27,857 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8f0d77d608530d497fe4f44ffdd89312/C in TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:27,857 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/93f6e066b27844a997844ff51e5daf4c, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/42e00882621447b394c8c350a878af9e, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/fbd5c0053c43439084121ec5e367c58b, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/dc48c177d5174a26bb7663979c9c018c] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp, totalSize=48.1 K 2024-12-08T00:22:27,857 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 93f6e066b27844a997844ff51e5daf4c, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1733617345157 2024-12-08T00:22:27,857 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 42e00882621447b394c8c350a878af9e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1733617345796 2024-12-08T00:22:27,858 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting fbd5c0053c43439084121ec5e367c58b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1733617346415 2024-12-08T00:22:27,858 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting dc48c177d5174a26bb7663979c9c018c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1733617346548 2024-12-08T00:22:27,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742569_1745 (size=31685) 2024-12-08T00:22:27,863 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/e0dd830975bc4fb8aa7b0e93b7b292bb as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/e0dd830975bc4fb8aa7b0e93b7b292bb 2024-12-08T00:22:27,868 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8f0d77d608530d497fe4f44ffdd89312/A of 8f0d77d608530d497fe4f44ffdd89312 into e0dd830975bc4fb8aa7b0e93b7b292bb(size=30.9 K), total size for store is 61.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:22:27,868 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8f0d77d608530d497fe4f44ffdd89312#C#compaction#627 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:22:27,868 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8f0d77d608530d497fe4f44ffdd89312: 2024-12-08T00:22:27,868 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312., storeName=8f0d77d608530d497fe4f44ffdd89312/A, priority=13, startTime=1733617347411; duration=0sec 2024-12-08T00:22:27,868 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:22:27,868 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8f0d77d608530d497fe4f44ffdd89312:A 2024-12-08T00:22:27,868 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/d01712fae9904c39b51e2c8f6710b721 is 50, key is test_row_0/C:col10/1733617346554/Put/seqid=0 2024-12-08T00:22:27,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742570_1746 (size=12915) 2024-12-08T00:22:27,876 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/d01712fae9904c39b51e2c8f6710b721 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/d01712fae9904c39b51e2c8f6710b721 2024-12-08T00:22:27,880 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 8f0d77d608530d497fe4f44ffdd89312/C of 8f0d77d608530d497fe4f44ffdd89312 into d01712fae9904c39b51e2c8f6710b721(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:22:27,880 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8f0d77d608530d497fe4f44ffdd89312: 2024-12-08T00:22:27,880 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312., storeName=8f0d77d608530d497fe4f44ffdd89312/C, priority=12, startTime=1733617347412; duration=0sec 2024-12-08T00:22:27,880 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:22:27,880 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8f0d77d608530d497fe4f44ffdd89312:C 2024-12-08T00:22:27,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-12-08T00:22:27,923 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:27,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617407922, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:27,926 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:27,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617407924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:27,926 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:27,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617407924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:27,970 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:22:27,970 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=184 2024-12-08T00:22:27,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:27,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. as already flushing 2024-12-08T00:22:27,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:27,971 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] handler.RSProcedureHandler(58): pid=184 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:27,971 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=184 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:27,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=184 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:28,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-12-08T00:22:28,122 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:22:28,123 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=184 2024-12-08T00:22:28,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:28,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. as already flushing 2024-12-08T00:22:28,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:28,123 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=184}] handler.RSProcedureHandler(58): pid=184 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:28,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=184 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:28,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=184 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:28,126 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:28,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617408124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:28,128 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:28,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617408127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:28,129 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:28,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617408127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:28,223 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/a813e349af604e65b12cf9e052645cb1 2024-12-08T00:22:28,229 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/e8300cf7c59847f4a7dc111878682114 is 50, key is test_row_0/C:col10/1733617347787/Put/seqid=0 2024-12-08T00:22:28,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742571_1747 (size=12301) 2024-12-08T00:22:28,275 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:22:28,275 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=184 2024-12-08T00:22:28,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:28,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. as already flushing 2024-12-08T00:22:28,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:28,276 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] handler.RSProcedureHandler(58): pid=184 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:28,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=184 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:28,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=184 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:28,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-12-08T00:22:28,427 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:22:28,428 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=184 2024-12-08T00:22:28,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:28,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. as already flushing 2024-12-08T00:22:28,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:28,428 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] handler.RSProcedureHandler(58): pid=184 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:28,428 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:28,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=184 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:28,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617408427, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:28,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=184 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:28,432 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:28,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617408431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:28,432 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:28,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617408432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:28,580 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:22:28,581 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=184 2024-12-08T00:22:28,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:28,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. as already flushing 2024-12-08T00:22:28,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:28,581 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=184}] handler.RSProcedureHandler(58): pid=184 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:28,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=184 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:28,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=184 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:28,633 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/e8300cf7c59847f4a7dc111878682114 2024-12-08T00:22:28,636 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/e61f4e8185c7489ca0d584530cf5dfac as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/e61f4e8185c7489ca0d584530cf5dfac 2024-12-08T00:22:28,639 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/e61f4e8185c7489ca0d584530cf5dfac, entries=150, sequenceid=291, filesize=30.5 K 2024-12-08T00:22:28,640 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/a813e349af604e65b12cf9e052645cb1 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/a813e349af604e65b12cf9e052645cb1 2024-12-08T00:22:28,643 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/a813e349af604e65b12cf9e052645cb1, entries=150, sequenceid=291, filesize=12.0 K 2024-12-08T00:22:28,644 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/e8300cf7c59847f4a7dc111878682114 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/e8300cf7c59847f4a7dc111878682114 2024-12-08T00:22:28,647 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/e8300cf7c59847f4a7dc111878682114, entries=150, sequenceid=291, filesize=12.0 K 2024-12-08T00:22:28,647 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 8f0d77d608530d497fe4f44ffdd89312 in 859ms, sequenceid=291, compaction requested=true 2024-12-08T00:22:28,648 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8f0d77d608530d497fe4f44ffdd89312: 2024-12-08T00:22:28,648 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8f0d77d608530d497fe4f44ffdd89312:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:22:28,648 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:22:28,648 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:22:28,648 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8f0d77d608530d497fe4f44ffdd89312:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T00:22:28,648 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:22:28,648 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8f0d77d608530d497fe4f44ffdd89312:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:22:28,648 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:22:28,648 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:22:28,649 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94195 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:22:28,649 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:22:28,649 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): 8f0d77d608530d497fe4f44ffdd89312/A is initiating minor compaction (all files) 2024-12-08T00:22:28,649 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): 8f0d77d608530d497fe4f44ffdd89312/B is initiating minor compaction (all files) 2024-12-08T00:22:28,649 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8f0d77d608530d497fe4f44ffdd89312/A in TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:28,649 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8f0d77d608530d497fe4f44ffdd89312/B in TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:28,649 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/e0dd830975bc4fb8aa7b0e93b7b292bb, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/2cc49d398ea64872adf64bd04ebe200d, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/e61f4e8185c7489ca0d584530cf5dfac] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp, totalSize=92.0 K 2024-12-08T00:22:28,649 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/95be750086e042698d6129488c2a2dac, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/6968b09de5144176abf3f8e5bc1974bc, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/a813e349af604e65b12cf9e052645cb1] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp, totalSize=36.5 K 2024-12-08T00:22:28,649 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:28,649 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. files: [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/e0dd830975bc4fb8aa7b0e93b7b292bb, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/2cc49d398ea64872adf64bd04ebe200d, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/e61f4e8185c7489ca0d584530cf5dfac] 2024-12-08T00:22:28,649 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting e0dd830975bc4fb8aa7b0e93b7b292bb, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1733617346415 2024-12-08T00:22:28,649 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 95be750086e042698d6129488c2a2dac, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1733617346415 2024-12-08T00:22:28,650 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 6968b09de5144176abf3f8e5bc1974bc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1733617346548 2024-12-08T00:22:28,650 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2cc49d398ea64872adf64bd04ebe200d, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1733617346548 2024-12-08T00:22:28,650 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting a813e349af604e65b12cf9e052645cb1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1733617347680 2024-12-08T00:22:28,650 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting e61f4e8185c7489ca0d584530cf5dfac, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1733617347680 2024-12-08T00:22:28,655 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=8f0d77d608530d497fe4f44ffdd89312] 2024-12-08T00:22:28,656 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8f0d77d608530d497fe4f44ffdd89312#B#compaction#629 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:22:28,657 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241208a92ba1bfb15247fd8972ea707f0c9b22_8f0d77d608530d497fe4f44ffdd89312 store=[table=TestAcidGuarantees family=A region=8f0d77d608530d497fe4f44ffdd89312] 2024-12-08T00:22:28,657 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/224f7e500c49461abda316f8f2985602 is 50, key is test_row_0/B:col10/1733617347787/Put/seqid=0 2024-12-08T00:22:28,658 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241208a92ba1bfb15247fd8972ea707f0c9b22_8f0d77d608530d497fe4f44ffdd89312, store=[table=TestAcidGuarantees family=A region=8f0d77d608530d497fe4f44ffdd89312] 2024-12-08T00:22:28,659 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208a92ba1bfb15247fd8972ea707f0c9b22_8f0d77d608530d497fe4f44ffdd89312 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=8f0d77d608530d497fe4f44ffdd89312] 2024-12-08T00:22:28,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742572_1748 (size=12983) 2024-12-08T00:22:28,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742573_1749 (size=4469) 2024-12-08T00:22:28,663 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8f0d77d608530d497fe4f44ffdd89312#A#compaction#630 average throughput is 3.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:22:28,664 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/c25751b8aa0a45a88e9f681eb99f2649 is 175, key is test_row_0/A:col10/1733617347787/Put/seqid=0 2024-12-08T00:22:28,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742574_1750 (size=31937) 2024-12-08T00:22:28,679 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/c25751b8aa0a45a88e9f681eb99f2649 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/c25751b8aa0a45a88e9f681eb99f2649 2024-12-08T00:22:28,683 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8f0d77d608530d497fe4f44ffdd89312/A of 8f0d77d608530d497fe4f44ffdd89312 into c25751b8aa0a45a88e9f681eb99f2649(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:22:28,683 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8f0d77d608530d497fe4f44ffdd89312: 2024-12-08T00:22:28,683 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312., storeName=8f0d77d608530d497fe4f44ffdd89312/A, priority=13, startTime=1733617348648; duration=0sec 2024-12-08T00:22:28,683 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:22:28,683 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8f0d77d608530d497fe4f44ffdd89312:A 2024-12-08T00:22:28,683 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-08T00:22:28,684 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-08T00:22:28,684 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-08T00:22:28,684 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. because compaction request was cancelled 2024-12-08T00:22:28,684 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8f0d77d608530d497fe4f44ffdd89312:C 2024-12-08T00:22:28,733 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:22:28,734 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=184 2024-12-08T00:22:28,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:28,734 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegion(2837): Flushing 8f0d77d608530d497fe4f44ffdd89312 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-08T00:22:28,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=A 2024-12-08T00:22:28,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:28,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=B 2024-12-08T00:22:28,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:28,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=C 2024-12-08T00:22:28,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:28,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412086b1e90c657274273919ed4b344b14848_8f0d77d608530d497fe4f44ffdd89312 is 50, key is test_row_0/A:col10/1733617347814/Put/seqid=0 2024-12-08T00:22:28,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742575_1751 (size=12454) 2024-12-08T00:22:28,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:28,761 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412086b1e90c657274273919ed4b344b14848_8f0d77d608530d497fe4f44ffdd89312 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412086b1e90c657274273919ed4b344b14848_8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:28,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/6316da2d6cb944df900a2e83ba14168d, store: [table=TestAcidGuarantees family=A region=8f0d77d608530d497fe4f44ffdd89312] 2024-12-08T00:22:28,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/6316da2d6cb944df900a2e83ba14168d is 175, key is test_row_0/A:col10/1733617347814/Put/seqid=0 2024-12-08T00:22:28,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742576_1752 (size=31255) 2024-12-08T00:22:28,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-12-08T00:22:28,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on 8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:28,933 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. as already flushing 2024-12-08T00:22:28,944 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:28,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617408941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:28,945 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:28,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617408943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:28,946 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:28,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617408944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:29,046 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:29,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617409045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:29,048 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:29,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617409046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:29,048 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:29,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617409047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:29,066 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/224f7e500c49461abda316f8f2985602 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/224f7e500c49461abda316f8f2985602 2024-12-08T00:22:29,070 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8f0d77d608530d497fe4f44ffdd89312/B of 8f0d77d608530d497fe4f44ffdd89312 into 224f7e500c49461abda316f8f2985602(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:22:29,070 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8f0d77d608530d497fe4f44ffdd89312: 2024-12-08T00:22:29,070 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312., storeName=8f0d77d608530d497fe4f44ffdd89312/B, priority=13, startTime=1733617348648; duration=0sec 2024-12-08T00:22:29,070 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:22:29,070 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8f0d77d608530d497fe4f44ffdd89312:B 2024-12-08T00:22:29,167 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=318, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/6316da2d6cb944df900a2e83ba14168d 2024-12-08T00:22:29,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/1318f2a27a47473086bcb380fff179a3 is 50, key is test_row_0/B:col10/1733617347814/Put/seqid=0 2024-12-08T00:22:29,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742577_1753 (size=12301) 2024-12-08T00:22:29,249 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:29,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617409247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:29,251 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:29,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617409249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:29,251 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:29,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617409250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:29,552 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:29,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617409551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:29,554 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:29,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617409553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:29,554 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:29,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617409554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:29,577 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=318 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/1318f2a27a47473086bcb380fff179a3 2024-12-08T00:22:29,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/b87959ce3a954d999811cbaf4ad45c67 is 50, key is test_row_0/C:col10/1733617347814/Put/seqid=0 2024-12-08T00:22:29,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742578_1754 (size=12301) 2024-12-08T00:22:29,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-12-08T00:22:29,987 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=318 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/b87959ce3a954d999811cbaf4ad45c67 2024-12-08T00:22:29,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/6316da2d6cb944df900a2e83ba14168d as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/6316da2d6cb944df900a2e83ba14168d 2024-12-08T00:22:29,994 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/6316da2d6cb944df900a2e83ba14168d, entries=150, sequenceid=318, filesize=30.5 K 2024-12-08T00:22:29,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/1318f2a27a47473086bcb380fff179a3 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/1318f2a27a47473086bcb380fff179a3 2024-12-08T00:22:29,997 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/1318f2a27a47473086bcb380fff179a3, entries=150, sequenceid=318, filesize=12.0 K 2024-12-08T00:22:29,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/b87959ce3a954d999811cbaf4ad45c67 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/b87959ce3a954d999811cbaf4ad45c67 2024-12-08T00:22:30,001 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/b87959ce3a954d999811cbaf4ad45c67, entries=150, sequenceid=318, filesize=12.0 K 2024-12-08T00:22:30,001 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 8f0d77d608530d497fe4f44ffdd89312 in 1267ms, sequenceid=318, compaction requested=true 2024-12-08T00:22:30,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegion(2538): Flush status journal for 8f0d77d608530d497fe4f44ffdd89312: 2024-12-08T00:22:30,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:30,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=184 2024-12-08T00:22:30,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4106): Remote procedure done, pid=184 2024-12-08T00:22:30,003 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=184, resume processing ppid=183 2024-12-08T00:22:30,003 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=184, ppid=183, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1850 sec 2024-12-08T00:22:30,004 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=183, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=183, table=TestAcidGuarantees in 2.1880 sec 2024-12-08T00:22:30,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on 8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:30,056 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8f0d77d608530d497fe4f44ffdd89312 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-08T00:22:30,057 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=A 2024-12-08T00:22:30,058 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:30,058 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=B 2024-12-08T00:22:30,058 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:30,058 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=C 2024-12-08T00:22:30,058 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:30,064 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120863cb386307174a2a851fcce21a52317e_8f0d77d608530d497fe4f44ffdd89312 is 50, key is test_row_0/A:col10/1733617348943/Put/seqid=0 2024-12-08T00:22:30,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742579_1755 (size=14994) 2024-12-08T00:22:30,068 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:30,071 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120863cb386307174a2a851fcce21a52317e_8f0d77d608530d497fe4f44ffdd89312 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120863cb386307174a2a851fcce21a52317e_8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:30,072 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/ed28093fa6784bc79f93f4a6df91f9b4, store: [table=TestAcidGuarantees family=A region=8f0d77d608530d497fe4f44ffdd89312] 2024-12-08T00:22:30,073 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/ed28093fa6784bc79f93f4a6df91f9b4 is 175, key is test_row_0/A:col10/1733617348943/Put/seqid=0 2024-12-08T00:22:30,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742580_1756 (size=39949) 2024-12-08T00:22:30,076 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=334, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/ed28093fa6784bc79f93f4a6df91f9b4 2024-12-08T00:22:30,082 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/52e243dec21a41ae95097df1ff475ac8 is 50, key is test_row_0/B:col10/1733617348943/Put/seqid=0 2024-12-08T00:22:30,085 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:30,085 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:30,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617410081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:30,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617410081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:30,085 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:30,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617410082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:30,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742581_1757 (size=12301) 2024-12-08T00:22:30,093 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=334 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/52e243dec21a41ae95097df1ff475ac8 2024-12-08T00:22:30,100 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/9e631efa2e8246db8206f2b924274eb7 is 50, key is test_row_0/C:col10/1733617348943/Put/seqid=0 2024-12-08T00:22:30,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742582_1758 (size=12301) 2024-12-08T00:22:30,188 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:30,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617410186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:30,188 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:30,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617410186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:30,189 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:30,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617410186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:30,391 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:30,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617410389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:30,392 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:30,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617410390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:30,392 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:30,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617410390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:30,504 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=334 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/9e631efa2e8246db8206f2b924274eb7 2024-12-08T00:22:30,508 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/ed28093fa6784bc79f93f4a6df91f9b4 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/ed28093fa6784bc79f93f4a6df91f9b4 2024-12-08T00:22:30,511 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/ed28093fa6784bc79f93f4a6df91f9b4, entries=200, sequenceid=334, filesize=39.0 K 2024-12-08T00:22:30,512 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/52e243dec21a41ae95097df1ff475ac8 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/52e243dec21a41ae95097df1ff475ac8 2024-12-08T00:22:30,515 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/52e243dec21a41ae95097df1ff475ac8, entries=150, sequenceid=334, filesize=12.0 K 2024-12-08T00:22:30,515 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/9e631efa2e8246db8206f2b924274eb7 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/9e631efa2e8246db8206f2b924274eb7 2024-12-08T00:22:30,519 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/9e631efa2e8246db8206f2b924274eb7, entries=150, sequenceid=334, filesize=12.0 K 2024-12-08T00:22:30,520 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for 8f0d77d608530d497fe4f44ffdd89312 in 464ms, sequenceid=334, compaction requested=true 2024-12-08T00:22:30,520 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8f0d77d608530d497fe4f44ffdd89312: 2024-12-08T00:22:30,521 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8f0d77d608530d497fe4f44ffdd89312:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:22:30,521 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:22:30,521 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:22:30,521 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:22:30,521 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8f0d77d608530d497fe4f44ffdd89312:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T00:22:30,521 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:22:30,521 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8f0d77d608530d497fe4f44ffdd89312:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:22:30,521 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:22:30,522 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:22:30,522 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): 8f0d77d608530d497fe4f44ffdd89312/B is initiating minor compaction (all files) 2024-12-08T00:22:30,522 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8f0d77d608530d497fe4f44ffdd89312/B in TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:30,522 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/224f7e500c49461abda316f8f2985602, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/1318f2a27a47473086bcb380fff179a3, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/52e243dec21a41ae95097df1ff475ac8] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp, totalSize=36.7 K 2024-12-08T00:22:30,522 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:22:30,522 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): 8f0d77d608530d497fe4f44ffdd89312/A is initiating minor compaction (all files) 2024-12-08T00:22:30,522 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8f0d77d608530d497fe4f44ffdd89312/A in TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:30,522 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/c25751b8aa0a45a88e9f681eb99f2649, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/6316da2d6cb944df900a2e83ba14168d, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/ed28093fa6784bc79f93f4a6df91f9b4] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp, totalSize=100.7 K 2024-12-08T00:22:30,522 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:30,522 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. files: [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/c25751b8aa0a45a88e9f681eb99f2649, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/6316da2d6cb944df900a2e83ba14168d, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/ed28093fa6784bc79f93f4a6df91f9b4] 2024-12-08T00:22:30,523 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 224f7e500c49461abda316f8f2985602, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1733617347680 2024-12-08T00:22:30,523 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting c25751b8aa0a45a88e9f681eb99f2649, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1733617347680 2024-12-08T00:22:30,523 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 1318f2a27a47473086bcb380fff179a3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1733617347814 2024-12-08T00:22:30,523 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6316da2d6cb944df900a2e83ba14168d, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1733617347814 2024-12-08T00:22:30,523 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 52e243dec21a41ae95097df1ff475ac8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1733617348942 2024-12-08T00:22:30,523 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting ed28093fa6784bc79f93f4a6df91f9b4, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1733617348942 2024-12-08T00:22:30,529 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=8f0d77d608530d497fe4f44ffdd89312] 2024-12-08T00:22:30,529 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8f0d77d608530d497fe4f44ffdd89312#B#compaction#637 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:22:30,530 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/98a191ecf15741eda2b55a9bc32d07b0 is 50, key is test_row_0/B:col10/1733617348943/Put/seqid=0 2024-12-08T00:22:30,530 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241208147eede6471341e798374257afef8eba_8f0d77d608530d497fe4f44ffdd89312 store=[table=TestAcidGuarantees family=A region=8f0d77d608530d497fe4f44ffdd89312] 2024-12-08T00:22:30,532 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241208147eede6471341e798374257afef8eba_8f0d77d608530d497fe4f44ffdd89312, store=[table=TestAcidGuarantees family=A region=8f0d77d608530d497fe4f44ffdd89312] 2024-12-08T00:22:30,532 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208147eede6471341e798374257afef8eba_8f0d77d608530d497fe4f44ffdd89312 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=8f0d77d608530d497fe4f44ffdd89312] 2024-12-08T00:22:30,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742583_1759 (size=13085) 2024-12-08T00:22:30,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742584_1760 (size=4469) 2024-12-08T00:22:30,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on 8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:30,695 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8f0d77d608530d497fe4f44ffdd89312 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-08T00:22:30,696 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=A 2024-12-08T00:22:30,696 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:30,696 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=B 2024-12-08T00:22:30,696 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:30,696 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=C 2024-12-08T00:22:30,696 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:30,702 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412081a4597b41e1749e682e803c694f037d4_8f0d77d608530d497fe4f44ffdd89312 is 50, key is test_row_0/A:col10/1733617350695/Put/seqid=0 2024-12-08T00:22:30,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742585_1761 (size=12454) 2024-12-08T00:22:30,706 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:30,707 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:30,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617410704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:30,709 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412081a4597b41e1749e682e803c694f037d4_8f0d77d608530d497fe4f44ffdd89312 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412081a4597b41e1749e682e803c694f037d4_8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:30,709 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/dcb4928b652a4be583cf0bcccb1f38ae, store: [table=TestAcidGuarantees family=A region=8f0d77d608530d497fe4f44ffdd89312] 2024-12-08T00:22:30,710 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/dcb4928b652a4be583cf0bcccb1f38ae is 175, key is test_row_0/A:col10/1733617350695/Put/seqid=0 2024-12-08T00:22:30,710 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:30,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617410707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:30,711 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:30,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617410707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:30,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742586_1762 (size=31255) 2024-12-08T00:22:30,713 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=358, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/dcb4928b652a4be583cf0bcccb1f38ae 2024-12-08T00:22:30,719 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/aa69d594e026482d9cd1f9fc3d34b723 is 50, key is test_row_0/B:col10/1733617350695/Put/seqid=0 2024-12-08T00:22:30,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742587_1763 (size=12301) 2024-12-08T00:22:30,811 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:30,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617410808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:30,814 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:30,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617410812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:30,814 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:30,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617410812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:30,941 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/98a191ecf15741eda2b55a9bc32d07b0 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/98a191ecf15741eda2b55a9bc32d07b0 2024-12-08T00:22:30,945 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8f0d77d608530d497fe4f44ffdd89312/B of 8f0d77d608530d497fe4f44ffdd89312 into 98a191ecf15741eda2b55a9bc32d07b0(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:22:30,945 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8f0d77d608530d497fe4f44ffdd89312: 2024-12-08T00:22:30,945 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312., storeName=8f0d77d608530d497fe4f44ffdd89312/B, priority=13, startTime=1733617350521; duration=0sec 2024-12-08T00:22:30,945 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:22:30,945 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8f0d77d608530d497fe4f44ffdd89312:B 2024-12-08T00:22:30,945 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T00:22:30,946 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49818 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T00:22:30,946 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): 8f0d77d608530d497fe4f44ffdd89312/C is initiating minor compaction (all files) 2024-12-08T00:22:30,946 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8f0d77d608530d497fe4f44ffdd89312/C in TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:30,947 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/d01712fae9904c39b51e2c8f6710b721, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/e8300cf7c59847f4a7dc111878682114, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/b87959ce3a954d999811cbaf4ad45c67, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/9e631efa2e8246db8206f2b924274eb7] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp, totalSize=48.7 K 2024-12-08T00:22:30,947 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting d01712fae9904c39b51e2c8f6710b721, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1733617346548 2024-12-08T00:22:30,947 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting e8300cf7c59847f4a7dc111878682114, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1733617347680 2024-12-08T00:22:30,947 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting b87959ce3a954d999811cbaf4ad45c67, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1733617347814 2024-12-08T00:22:30,948 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 9e631efa2e8246db8206f2b924274eb7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1733617348942 2024-12-08T00:22:30,955 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8f0d77d608530d497fe4f44ffdd89312#A#compaction#638 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:22:30,956 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/f51250484e2e4e27815193e9d0b4e43f is 175, key is test_row_0/A:col10/1733617348943/Put/seqid=0 2024-12-08T00:22:30,968 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8f0d77d608530d497fe4f44ffdd89312#C#compaction#641 average throughput is 0.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:22:30,969 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/dcb9715f62c2424095303c6e10d404c0 is 50, key is test_row_0/C:col10/1733617348943/Put/seqid=0 2024-12-08T00:22:30,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742588_1764 (size=32039) 2024-12-08T00:22:30,979 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/f51250484e2e4e27815193e9d0b4e43f as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/f51250484e2e4e27815193e9d0b4e43f 2024-12-08T00:22:30,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742589_1765 (size=13051) 2024-12-08T00:22:30,984 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8f0d77d608530d497fe4f44ffdd89312/A of 8f0d77d608530d497fe4f44ffdd89312 into f51250484e2e4e27815193e9d0b4e43f(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:22:30,984 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8f0d77d608530d497fe4f44ffdd89312: 2024-12-08T00:22:30,984 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312., storeName=8f0d77d608530d497fe4f44ffdd89312/A, priority=13, startTime=1733617350520; duration=0sec 2024-12-08T00:22:30,984 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:22:30,984 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8f0d77d608530d497fe4f44ffdd89312:A 2024-12-08T00:22:31,014 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:31,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617411012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:31,016 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:31,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617411015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:31,017 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:31,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617411016, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:31,123 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=358 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/aa69d594e026482d9cd1f9fc3d34b723 2024-12-08T00:22:31,130 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/b7503c6105fd42b2aa4c80f634892954 is 50, key is test_row_0/C:col10/1733617350695/Put/seqid=0 2024-12-08T00:22:31,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742590_1766 (size=12301) 2024-12-08T00:22:31,134 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=358 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/b7503c6105fd42b2aa4c80f634892954 2024-12-08T00:22:31,137 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/dcb4928b652a4be583cf0bcccb1f38ae as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/dcb4928b652a4be583cf0bcccb1f38ae 2024-12-08T00:22:31,142 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/dcb4928b652a4be583cf0bcccb1f38ae, entries=150, sequenceid=358, filesize=30.5 K 2024-12-08T00:22:31,142 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/aa69d594e026482d9cd1f9fc3d34b723 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/aa69d594e026482d9cd1f9fc3d34b723 2024-12-08T00:22:31,145 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/aa69d594e026482d9cd1f9fc3d34b723, entries=150, sequenceid=358, filesize=12.0 K 2024-12-08T00:22:31,146 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/b7503c6105fd42b2aa4c80f634892954 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/b7503c6105fd42b2aa4c80f634892954 2024-12-08T00:22:31,148 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/b7503c6105fd42b2aa4c80f634892954, entries=150, sequenceid=358, filesize=12.0 K 2024-12-08T00:22:31,152 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 8f0d77d608530d497fe4f44ffdd89312 in 457ms, sequenceid=358, compaction requested=false 2024-12-08T00:22:31,152 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8f0d77d608530d497fe4f44ffdd89312: 2024-12-08T00:22:31,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on 8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:31,318 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8f0d77d608530d497fe4f44ffdd89312 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-08T00:22:31,319 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=A 2024-12-08T00:22:31,319 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:31,319 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=B 2024-12-08T00:22:31,319 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:31,319 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=C 2024-12-08T00:22:31,319 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:31,325 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412084b8a2a7766ec452d8255a9b35dc42c76_8f0d77d608530d497fe4f44ffdd89312 is 50, key is test_row_0/A:col10/1733617350699/Put/seqid=0 2024-12-08T00:22:31,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742591_1767 (size=14994) 2024-12-08T00:22:31,330 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:31,333 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412084b8a2a7766ec452d8255a9b35dc42c76_8f0d77d608530d497fe4f44ffdd89312 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412084b8a2a7766ec452d8255a9b35dc42c76_8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:31,334 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/82f2d1a4855643b4865cf88ef7bf562c, store: [table=TestAcidGuarantees family=A region=8f0d77d608530d497fe4f44ffdd89312] 2024-12-08T00:22:31,335 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/82f2d1a4855643b4865cf88ef7bf562c is 175, key is test_row_0/A:col10/1733617350699/Put/seqid=0 2024-12-08T00:22:31,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742592_1768 (size=39949) 2024-12-08T00:22:31,351 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:31,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617411348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:31,353 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:31,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617411351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:31,354 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:31,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617411351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:31,386 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/dcb9715f62c2424095303c6e10d404c0 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/dcb9715f62c2424095303c6e10d404c0 2024-12-08T00:22:31,390 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 8f0d77d608530d497fe4f44ffdd89312/C of 8f0d77d608530d497fe4f44ffdd89312 into dcb9715f62c2424095303c6e10d404c0(size=12.7 K), total size for store is 24.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:22:31,390 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8f0d77d608530d497fe4f44ffdd89312: 2024-12-08T00:22:31,390 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312., storeName=8f0d77d608530d497fe4f44ffdd89312/C, priority=12, startTime=1733617350521; duration=0sec 2024-12-08T00:22:31,390 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:22:31,390 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8f0d77d608530d497fe4f44ffdd89312:C 2024-12-08T00:22:31,453 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:31,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617411452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:31,456 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:31,456 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:31,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617411455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:31,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617411455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:31,657 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:31,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617411656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:31,659 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:31,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617411657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:31,659 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:31,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617411658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:31,745 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=373, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/82f2d1a4855643b4865cf88ef7bf562c 2024-12-08T00:22:31,750 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/fd94d9216e7d4c8382934197a2efdb71 is 50, key is test_row_0/B:col10/1733617350699/Put/seqid=0 2024-12-08T00:22:31,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742593_1769 (size=12301) 2024-12-08T00:22:31,755 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=373 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/fd94d9216e7d4c8382934197a2efdb71 2024-12-08T00:22:31,760 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/3867431e5b764d308a959c3a122990bd is 50, key is test_row_0/C:col10/1733617350699/Put/seqid=0 2024-12-08T00:22:31,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742594_1770 (size=12301) 2024-12-08T00:22:31,764 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=373 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/3867431e5b764d308a959c3a122990bd 2024-12-08T00:22:31,768 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/82f2d1a4855643b4865cf88ef7bf562c as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/82f2d1a4855643b4865cf88ef7bf562c 2024-12-08T00:22:31,771 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/82f2d1a4855643b4865cf88ef7bf562c, entries=200, sequenceid=373, filesize=39.0 K 2024-12-08T00:22:31,772 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/fd94d9216e7d4c8382934197a2efdb71 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/fd94d9216e7d4c8382934197a2efdb71 2024-12-08T00:22:31,775 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/fd94d9216e7d4c8382934197a2efdb71, entries=150, sequenceid=373, filesize=12.0 K 2024-12-08T00:22:31,776 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/3867431e5b764d308a959c3a122990bd as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/3867431e5b764d308a959c3a122990bd 2024-12-08T00:22:31,779 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/3867431e5b764d308a959c3a122990bd, entries=150, sequenceid=373, filesize=12.0 K 2024-12-08T00:22:31,780 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 8f0d77d608530d497fe4f44ffdd89312 in 461ms, sequenceid=373, compaction requested=true 2024-12-08T00:22:31,780 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8f0d77d608530d497fe4f44ffdd89312: 2024-12-08T00:22:31,780 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8f0d77d608530d497fe4f44ffdd89312:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:22:31,780 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:22:31,780 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8f0d77d608530d497fe4f44ffdd89312:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T00:22:31,780 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:22:31,780 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:22:31,780 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8f0d77d608530d497fe4f44ffdd89312:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:22:31,780 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:22:31,780 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:22:31,781 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:22:31,781 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): 8f0d77d608530d497fe4f44ffdd89312/B is initiating minor compaction (all files) 2024-12-08T00:22:31,781 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8f0d77d608530d497fe4f44ffdd89312/B in TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:31,781 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/98a191ecf15741eda2b55a9bc32d07b0, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/aa69d594e026482d9cd1f9fc3d34b723, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/fd94d9216e7d4c8382934197a2efdb71] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp, totalSize=36.8 K 2024-12-08T00:22:31,781 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103243 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:22:31,781 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): 8f0d77d608530d497fe4f44ffdd89312/A is initiating minor compaction (all files) 2024-12-08T00:22:31,781 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8f0d77d608530d497fe4f44ffdd89312/A in TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:31,781 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/f51250484e2e4e27815193e9d0b4e43f, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/dcb4928b652a4be583cf0bcccb1f38ae, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/82f2d1a4855643b4865cf88ef7bf562c] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp, totalSize=100.8 K 2024-12-08T00:22:31,781 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:31,782 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. files: [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/f51250484e2e4e27815193e9d0b4e43f, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/dcb4928b652a4be583cf0bcccb1f38ae, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/82f2d1a4855643b4865cf88ef7bf562c] 2024-12-08T00:22:31,782 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 98a191ecf15741eda2b55a9bc32d07b0, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1733617348942 2024-12-08T00:22:31,782 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting f51250484e2e4e27815193e9d0b4e43f, keycount=150, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1733617348942 2024-12-08T00:22:31,782 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting aa69d594e026482d9cd1f9fc3d34b723, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=358, earliestPutTs=1733617350080 2024-12-08T00:22:31,782 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting dcb4928b652a4be583cf0bcccb1f38ae, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=358, earliestPutTs=1733617350080 2024-12-08T00:22:31,783 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting fd94d9216e7d4c8382934197a2efdb71, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=373, earliestPutTs=1733617350699 2024-12-08T00:22:31,783 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 82f2d1a4855643b4865cf88ef7bf562c, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=373, earliestPutTs=1733617350699 2024-12-08T00:22:31,790 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=8f0d77d608530d497fe4f44ffdd89312] 2024-12-08T00:22:31,791 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8f0d77d608530d497fe4f44ffdd89312#B#compaction#646 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:22:31,791 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/4df6ba8c9298476da6962301ae2b1785 is 50, key is test_row_0/B:col10/1733617350699/Put/seqid=0 2024-12-08T00:22:31,791 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412080220e8e38fe54f339a1d21fd9b827cad_8f0d77d608530d497fe4f44ffdd89312 store=[table=TestAcidGuarantees family=A region=8f0d77d608530d497fe4f44ffdd89312] 2024-12-08T00:22:31,794 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412080220e8e38fe54f339a1d21fd9b827cad_8f0d77d608530d497fe4f44ffdd89312, store=[table=TestAcidGuarantees family=A region=8f0d77d608530d497fe4f44ffdd89312] 2024-12-08T00:22:31,794 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412080220e8e38fe54f339a1d21fd9b827cad_8f0d77d608530d497fe4f44ffdd89312 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=8f0d77d608530d497fe4f44ffdd89312] 2024-12-08T00:22:31,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742596_1772 (size=4469) 2024-12-08T00:22:31,803 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8f0d77d608530d497fe4f44ffdd89312#A#compaction#647 average throughput is 1.88 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:22:31,804 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/48088d60396f4b2ba2884a96f634c225 is 175, key is test_row_0/A:col10/1733617350699/Put/seqid=0 2024-12-08T00:22:31,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742595_1771 (size=13187) 2024-12-08T00:22:31,809 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/4df6ba8c9298476da6962301ae2b1785 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/4df6ba8c9298476da6962301ae2b1785 2024-12-08T00:22:31,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742597_1773 (size=32141) 2024-12-08T00:22:31,814 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8f0d77d608530d497fe4f44ffdd89312/B of 8f0d77d608530d497fe4f44ffdd89312 into 4df6ba8c9298476da6962301ae2b1785(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:22:31,814 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8f0d77d608530d497fe4f44ffdd89312: 2024-12-08T00:22:31,814 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312., storeName=8f0d77d608530d497fe4f44ffdd89312/B, priority=13, startTime=1733617351780; duration=0sec 2024-12-08T00:22:31,815 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:22:31,815 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8f0d77d608530d497fe4f44ffdd89312:B 2024-12-08T00:22:31,815 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T00:22:31,815 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/48088d60396f4b2ba2884a96f634c225 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/48088d60396f4b2ba2884a96f634c225 2024-12-08T00:22:31,816 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T00:22:31,816 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): 8f0d77d608530d497fe4f44ffdd89312/C is initiating minor compaction (all files) 2024-12-08T00:22:31,816 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8f0d77d608530d497fe4f44ffdd89312/C in TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:31,816 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/dcb9715f62c2424095303c6e10d404c0, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/b7503c6105fd42b2aa4c80f634892954, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/3867431e5b764d308a959c3a122990bd] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp, totalSize=36.8 K 2024-12-08T00:22:31,816 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting dcb9715f62c2424095303c6e10d404c0, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1733617348942 2024-12-08T00:22:31,817 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting b7503c6105fd42b2aa4c80f634892954, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=358, earliestPutTs=1733617350080 2024-12-08T00:22:31,817 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 3867431e5b764d308a959c3a122990bd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=373, earliestPutTs=1733617350699 2024-12-08T00:22:31,820 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8f0d77d608530d497fe4f44ffdd89312/A of 8f0d77d608530d497fe4f44ffdd89312 into 48088d60396f4b2ba2884a96f634c225(size=31.4 K), total size for store is 31.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:22:31,820 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8f0d77d608530d497fe4f44ffdd89312: 2024-12-08T00:22:31,820 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312., storeName=8f0d77d608530d497fe4f44ffdd89312/A, priority=13, startTime=1733617351780; duration=0sec 2024-12-08T00:22:31,820 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:22:31,820 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8f0d77d608530d497fe4f44ffdd89312:A 2024-12-08T00:22:31,825 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8f0d77d608530d497fe4f44ffdd89312#C#compaction#648 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:22:31,825 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/e96ab5bab97f40f99579fe459feb6abc is 50, key is test_row_0/C:col10/1733617350699/Put/seqid=0 2024-12-08T00:22:31,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742598_1774 (size=13153) 2024-12-08T00:22:31,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-12-08T00:22:31,922 INFO [Thread-2911 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 183 completed 2024-12-08T00:22:31,923 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T00:22:31,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=185, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=185, table=TestAcidGuarantees 2024-12-08T00:22:31,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-12-08T00:22:31,924 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=185, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=185, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:22:31,925 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=185, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=185, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:22:31,925 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=186, ppid=185, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:22:31,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on 8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:31,962 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8f0d77d608530d497fe4f44ffdd89312 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-08T00:22:31,963 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=A 2024-12-08T00:22:31,963 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:31,963 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=B 2024-12-08T00:22:31,963 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:31,963 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=C 2024-12-08T00:22:31,963 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:31,969 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208ca49dd4cfd8e4eebb13a2f3f772642f7_8f0d77d608530d497fe4f44ffdd89312 is 50, key is test_row_0/A:col10/1733617351962/Put/seqid=0 2024-12-08T00:22:31,970 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:31,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617411968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:31,971 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:31,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 241 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617411969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:31,971 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:31,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617411970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:31,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742599_1775 (size=14994) 2024-12-08T00:22:32,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-12-08T00:22:32,073 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:32,074 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:32,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617412071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:32,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 243 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617412072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:32,074 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:32,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617412072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:32,077 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:22:32,077 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=186 2024-12-08T00:22:32,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:32,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. as already flushing 2024-12-08T00:22:32,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:32,077 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] handler.RSProcedureHandler(58): pid=186 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:32,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=186 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:32,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=186 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:32,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-12-08T00:22:32,232 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:22:32,233 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=186 2024-12-08T00:22:32,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:32,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. as already flushing 2024-12-08T00:22:32,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:32,233 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] handler.RSProcedureHandler(58): pid=186 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:32,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=186 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:32,233 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/e96ab5bab97f40f99579fe459feb6abc as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/e96ab5bab97f40f99579fe459feb6abc 2024-12-08T00:22:32,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=186 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:32,239 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8f0d77d608530d497fe4f44ffdd89312/C of 8f0d77d608530d497fe4f44ffdd89312 into e96ab5bab97f40f99579fe459feb6abc(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:22:32,239 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8f0d77d608530d497fe4f44ffdd89312: 2024-12-08T00:22:32,239 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312., storeName=8f0d77d608530d497fe4f44ffdd89312/C, priority=13, startTime=1733617351780; duration=0sec 2024-12-08T00:22:32,239 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:22:32,239 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8f0d77d608530d497fe4f44ffdd89312:C 2024-12-08T00:22:32,275 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:32,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617412275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:32,277 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:32,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 245 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617412276, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:32,277 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:32,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617412276, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:32,381 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:32,384 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208ca49dd4cfd8e4eebb13a2f3f772642f7_8f0d77d608530d497fe4f44ffdd89312 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208ca49dd4cfd8e4eebb13a2f3f772642f7_8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:32,384 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/712caaa7d8f84a68bbc63ec6ef19cd1a, store: [table=TestAcidGuarantees family=A region=8f0d77d608530d497fe4f44ffdd89312] 2024-12-08T00:22:32,385 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/712caaa7d8f84a68bbc63ec6ef19cd1a is 175, key is test_row_0/A:col10/1733617351962/Put/seqid=0 2024-12-08T00:22:32,385 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:22:32,385 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=186 2024-12-08T00:22:32,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:32,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. as already flushing 2024-12-08T00:22:32,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:32,386 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] handler.RSProcedureHandler(58): pid=186 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:32,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=186 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:32,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=186 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:32,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742600_1776 (size=39949) 2024-12-08T00:22:32,389 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=402, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/712caaa7d8f84a68bbc63ec6ef19cd1a 2024-12-08T00:22:32,397 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/c4d456ffb9e34315b5dfbe6c34329dc2 is 50, key is test_row_0/B:col10/1733617351962/Put/seqid=0 2024-12-08T00:22:32,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742601_1777 (size=12301) 2024-12-08T00:22:32,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-12-08T00:22:32,538 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:22:32,538 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=186 2024-12-08T00:22:32,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:32,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. as already flushing 2024-12-08T00:22:32,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:32,538 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] handler.RSProcedureHandler(58): pid=186 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:32,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=186 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:32,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=186 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:32,578 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:32,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617412577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:32,580 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:32,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 247 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617412579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:32,581 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:32,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617412580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:32,690 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:22:32,691 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=186 2024-12-08T00:22:32,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:32,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. as already flushing 2024-12-08T00:22:32,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:32,691 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] handler.RSProcedureHandler(58): pid=186 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:32,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=186 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:32,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=186 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:32,801 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=402 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/c4d456ffb9e34315b5dfbe6c34329dc2 2024-12-08T00:22:32,808 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/7adab3c570f14ad68f4dc5f75589ab2d is 50, key is test_row_0/C:col10/1733617351962/Put/seqid=0 2024-12-08T00:22:32,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742602_1778 (size=12301) 2024-12-08T00:22:32,812 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=402 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/7adab3c570f14ad68f4dc5f75589ab2d 2024-12-08T00:22:32,816 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/712caaa7d8f84a68bbc63ec6ef19cd1a as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/712caaa7d8f84a68bbc63ec6ef19cd1a 2024-12-08T00:22:32,819 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/712caaa7d8f84a68bbc63ec6ef19cd1a, entries=200, sequenceid=402, filesize=39.0 K 2024-12-08T00:22:32,820 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/c4d456ffb9e34315b5dfbe6c34329dc2 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/c4d456ffb9e34315b5dfbe6c34329dc2 2024-12-08T00:22:32,823 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/c4d456ffb9e34315b5dfbe6c34329dc2, entries=150, sequenceid=402, filesize=12.0 K 2024-12-08T00:22:32,824 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/7adab3c570f14ad68f4dc5f75589ab2d as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/7adab3c570f14ad68f4dc5f75589ab2d 2024-12-08T00:22:32,826 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/7adab3c570f14ad68f4dc5f75589ab2d, entries=150, sequenceid=402, filesize=12.0 K 2024-12-08T00:22:32,827 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 8f0d77d608530d497fe4f44ffdd89312 in 866ms, sequenceid=402, compaction requested=false 2024-12-08T00:22:32,827 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8f0d77d608530d497fe4f44ffdd89312: 2024-12-08T00:22:32,843 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:22:32,843 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=186 2024-12-08T00:22:32,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:32,844 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HRegion(2837): Flushing 8f0d77d608530d497fe4f44ffdd89312 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-08T00:22:32,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=A 2024-12-08T00:22:32,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:32,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=B 2024-12-08T00:22:32,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:32,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=C 2024-12-08T00:22:32,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:32,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208f0f2531aaec44217b3b0a223cda7bf36_8f0d77d608530d497fe4f44ffdd89312 is 50, key is test_row_0/A:col10/1733617351969/Put/seqid=0 2024-12-08T00:22:32,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742603_1779 (size=12454) 2024-12-08T00:22:32,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:32,857 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208f0f2531aaec44217b3b0a223cda7bf36_8f0d77d608530d497fe4f44ffdd89312 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208f0f2531aaec44217b3b0a223cda7bf36_8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:32,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/4bde131c73f547ea8635ad83bfbdc44c, store: [table=TestAcidGuarantees family=A region=8f0d77d608530d497fe4f44ffdd89312] 2024-12-08T00:22:32,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/4bde131c73f547ea8635ad83bfbdc44c is 175, key is test_row_0/A:col10/1733617351969/Put/seqid=0 2024-12-08T00:22:32,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742604_1780 (size=31255) 2024-12-08T00:22:33,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-12-08T00:22:33,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on 8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:33,083 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. as already flushing 2024-12-08T00:22:33,136 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:33,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617413134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:33,137 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:33,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617413134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:33,138 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:33,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 258 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617413136, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:33,231 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:33,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50200 deadline: 1733617413230, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:33,232 DEBUG [Thread-2905 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18176 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312., hostname=017dd09fb407,36703,1733617179335, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T00:22:33,239 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:33,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617413237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:33,239 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:33,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617413238, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:33,240 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:33,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 260 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617413239, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:33,262 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=413, memsize=15.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/4bde131c73f547ea8635ad83bfbdc44c 2024-12-08T00:22:33,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/1698624a90bb4beda645c7b182c7707a is 50, key is test_row_0/B:col10/1733617351969/Put/seqid=0 2024-12-08T00:22:33,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742605_1781 (size=12301) 2024-12-08T00:22:33,299 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:33,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50172 deadline: 1733617413298, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:33,300 DEBUG [Thread-2901 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18243 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312., hostname=017dd09fb407,36703,1733617179335, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T00:22:33,442 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:33,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 239 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617413440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:33,442 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:33,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617413441, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:33,444 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:33,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 262 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617413442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:33,682 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=413 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/1698624a90bb4beda645c7b182c7707a 2024-12-08T00:22:33,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/3f78f166cf8249338b7d4a72af0ce3a0 is 50, key is test_row_0/C:col10/1733617351969/Put/seqid=0 2024-12-08T00:22:33,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742606_1782 (size=12301) 2024-12-08T00:22:33,691 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=413 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/3f78f166cf8249338b7d4a72af0ce3a0 2024-12-08T00:22:33,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/4bde131c73f547ea8635ad83bfbdc44c as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/4bde131c73f547ea8635ad83bfbdc44c 2024-12-08T00:22:33,697 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/4bde131c73f547ea8635ad83bfbdc44c, entries=150, sequenceid=413, filesize=30.5 K 2024-12-08T00:22:33,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/1698624a90bb4beda645c7b182c7707a as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/1698624a90bb4beda645c7b182c7707a 2024-12-08T00:22:33,707 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/1698624a90bb4beda645c7b182c7707a, entries=150, sequenceid=413, filesize=12.0 K 2024-12-08T00:22:33,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/3f78f166cf8249338b7d4a72af0ce3a0 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/3f78f166cf8249338b7d4a72af0ce3a0 2024-12-08T00:22:33,711 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/3f78f166cf8249338b7d4a72af0ce3a0, entries=150, sequenceid=413, filesize=12.0 K 2024-12-08T00:22:33,712 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for 8f0d77d608530d497fe4f44ffdd89312 in 868ms, sequenceid=413, compaction requested=true 2024-12-08T00:22:33,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HRegion(2538): Flush status journal for 8f0d77d608530d497fe4f44ffdd89312: 2024-12-08T00:22:33,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:33,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=186 2024-12-08T00:22:33,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4106): Remote procedure done, pid=186 2024-12-08T00:22:33,714 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=186, resume processing ppid=185 2024-12-08T00:22:33,715 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=186, ppid=185, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7880 sec 2024-12-08T00:22:33,716 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=185, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=185, table=TestAcidGuarantees in 1.7920 sec 2024-12-08T00:22:33,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on 8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:33,746 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8f0d77d608530d497fe4f44ffdd89312 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-08T00:22:33,747 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=A 2024-12-08T00:22:33,747 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:33,747 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=B 2024-12-08T00:22:33,747 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:33,747 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=C 2024-12-08T00:22:33,747 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:33,753 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120890e362059d594b3c88d866345f97140d_8f0d77d608530d497fe4f44ffdd89312 is 50, key is test_row_0/A:col10/1733617353746/Put/seqid=0 2024-12-08T00:22:33,760 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:33,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 247 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617413757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:33,760 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:33,760 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:33,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617413757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:33,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 266 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617413757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:33,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742607_1783 (size=14994) 2024-12-08T00:22:33,863 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:33,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 249 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617413861, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:33,863 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:33,864 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:33,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617413861, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:33,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 268 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617413861, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:34,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-12-08T00:22:34,028 INFO [Thread-2911 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 185 completed 2024-12-08T00:22:34,030 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T00:22:34,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=187, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=187, table=TestAcidGuarantees 2024-12-08T00:22:34,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=187 2024-12-08T00:22:34,034 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=187, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=187, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:22:34,035 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=187, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=187, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:22:34,035 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=188, ppid=187, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:22:34,066 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:34,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 248 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617414065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:34,066 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:34,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 270 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617414065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:34,066 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:34,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 251 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617414065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:34,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=187 2024-12-08T00:22:34,167 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:34,171 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120890e362059d594b3c88d866345f97140d_8f0d77d608530d497fe4f44ffdd89312 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120890e362059d594b3c88d866345f97140d_8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:34,171 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/4d0609697719490ebbfba985f98ee142, store: [table=TestAcidGuarantees family=A region=8f0d77d608530d497fe4f44ffdd89312] 2024-12-08T00:22:34,172 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/4d0609697719490ebbfba985f98ee142 is 175, key is test_row_0/A:col10/1733617353746/Put/seqid=0 2024-12-08T00:22:34,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742608_1784 (size=39949) 2024-12-08T00:22:34,176 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=441, memsize=55.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/4d0609697719490ebbfba985f98ee142 2024-12-08T00:22:34,182 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/1d4bc306b02c4f03907aff03ef9c3c5e is 50, key is test_row_0/B:col10/1733617353746/Put/seqid=0 2024-12-08T00:22:34,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742609_1785 (size=12301) 2024-12-08T00:22:34,187 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:22:34,187 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=188 2024-12-08T00:22:34,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:34,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. as already flushing 2024-12-08T00:22:34,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:34,187 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] handler.RSProcedureHandler(58): pid=188 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:34,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=188 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:34,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=188 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:34,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=187 2024-12-08T00:22:34,339 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:22:34,340 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=188 2024-12-08T00:22:34,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:34,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. as already flushing 2024-12-08T00:22:34,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:34,340 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] handler.RSProcedureHandler(58): pid=188 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:34,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=188 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:34,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=188 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:34,369 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:34,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 272 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617414367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:34,370 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:34,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 250 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617414369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:34,371 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:34,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 253 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617414369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:34,492 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:22:34,492 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=188 2024-12-08T00:22:34,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:34,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. as already flushing 2024-12-08T00:22:34,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:34,493 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] handler.RSProcedureHandler(58): pid=188 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:34,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=188 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:34,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=188 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:34,586 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=441 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/1d4bc306b02c4f03907aff03ef9c3c5e 2024-12-08T00:22:34,591 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/653826ddf9ca4af9b479e0bef425ad28 is 50, key is test_row_0/C:col10/1733617353746/Put/seqid=0 2024-12-08T00:22:34,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742610_1786 (size=12301) 2024-12-08T00:22:34,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=187 2024-12-08T00:22:34,645 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:22:34,645 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=188 2024-12-08T00:22:34,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:34,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. as already flushing 2024-12-08T00:22:34,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:34,646 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] handler.RSProcedureHandler(58): pid=188 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:34,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=188 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:34,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=188 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:34,797 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:22:34,798 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=188 2024-12-08T00:22:34,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:34,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. as already flushing 2024-12-08T00:22:34,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:34,798 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] handler.RSProcedureHandler(58): pid=188 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:34,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=188 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:34,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=188 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:34,873 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:34,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 274 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50104 deadline: 1733617414871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:34,874 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:34,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 255 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50144 deadline: 1733617414873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:34,877 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T00:22:34,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36703 {}] ipc.CallRunner(138): callId: 252 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50150 deadline: 1733617414874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 2024-12-08T00:22:34,950 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:22:34,950 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=188 2024-12-08T00:22:34,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:34,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. as already flushing 2024-12-08T00:22:34,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:34,951 ERROR [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] handler.RSProcedureHandler(58): pid=188 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:34,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=188 java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:34,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4114): Remote procedure failed, pid=188 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:22:34,995 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=441 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/653826ddf9ca4af9b479e0bef425ad28 2024-12-08T00:22:34,999 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/4d0609697719490ebbfba985f98ee142 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/4d0609697719490ebbfba985f98ee142 2024-12-08T00:22:35,002 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/4d0609697719490ebbfba985f98ee142, entries=200, sequenceid=441, filesize=39.0 K 2024-12-08T00:22:35,003 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/1d4bc306b02c4f03907aff03ef9c3c5e as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/1d4bc306b02c4f03907aff03ef9c3c5e 2024-12-08T00:22:35,006 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/1d4bc306b02c4f03907aff03ef9c3c5e, entries=150, sequenceid=441, filesize=12.0 K 2024-12-08T00:22:35,006 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/653826ddf9ca4af9b479e0bef425ad28 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/653826ddf9ca4af9b479e0bef425ad28 2024-12-08T00:22:35,009 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/653826ddf9ca4af9b479e0bef425ad28, entries=150, sequenceid=441, filesize=12.0 K 2024-12-08T00:22:35,010 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=40.25 KB/41220 for 8f0d77d608530d497fe4f44ffdd89312 in 1264ms, sequenceid=441, compaction requested=true 2024-12-08T00:22:35,010 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8f0d77d608530d497fe4f44ffdd89312: 2024-12-08T00:22:35,010 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8f0d77d608530d497fe4f44ffdd89312:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T00:22:35,010 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:22:35,010 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8f0d77d608530d497fe4f44ffdd89312:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T00:22:35,010 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:22:35,010 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T00:22:35,010 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8f0d77d608530d497fe4f44ffdd89312:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T00:22:35,010 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:22:35,010 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T00:22:35,011 DEBUG [Thread-2920 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x68c2838a to 127.0.0.1:62287 2024-12-08T00:22:35,011 DEBUG [Thread-2912 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3f6a59e4 to 127.0.0.1:62287 2024-12-08T00:22:35,011 DEBUG [Thread-2912 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:22:35,011 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50090 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T00:22:35,011 DEBUG [Thread-2920 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:22:35,011 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): 8f0d77d608530d497fe4f44ffdd89312/B is initiating minor compaction (all files) 2024-12-08T00:22:35,011 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8f0d77d608530d497fe4f44ffdd89312/B in TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:35,011 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/4df6ba8c9298476da6962301ae2b1785, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/c4d456ffb9e34315b5dfbe6c34329dc2, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/1698624a90bb4beda645c7b182c7707a, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/1d4bc306b02c4f03907aff03ef9c3c5e] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp, totalSize=48.9 K 2024-12-08T00:22:35,012 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 143294 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T00:22:35,012 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1540): 8f0d77d608530d497fe4f44ffdd89312/A is initiating minor compaction (all files) 2024-12-08T00:22:35,012 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8f0d77d608530d497fe4f44ffdd89312/A in TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:35,012 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/48088d60396f4b2ba2884a96f634c225, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/712caaa7d8f84a68bbc63ec6ef19cd1a, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/4bde131c73f547ea8635ad83bfbdc44c, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/4d0609697719490ebbfba985f98ee142] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp, totalSize=139.9 K 2024-12-08T00:22:35,012 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:35,012 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. files: [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/48088d60396f4b2ba2884a96f634c225, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/712caaa7d8f84a68bbc63ec6ef19cd1a, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/4bde131c73f547ea8635ad83bfbdc44c, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/4d0609697719490ebbfba985f98ee142] 2024-12-08T00:22:35,012 DEBUG [Thread-2916 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3a3b66d3 to 127.0.0.1:62287 2024-12-08T00:22:35,012 DEBUG [Thread-2916 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:22:35,012 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 48088d60396f4b2ba2884a96f634c225, keycount=150, bloomtype=ROW, size=31.4 K, encoding=NONE, compression=NONE, seqNum=373, earliestPutTs=1733617350699 2024-12-08T00:22:35,012 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 4df6ba8c9298476da6962301ae2b1785, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=373, earliestPutTs=1733617350699 2024-12-08T00:22:35,013 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting c4d456ffb9e34315b5dfbe6c34329dc2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=402, earliestPutTs=1733617351347 2024-12-08T00:22:35,013 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 712caaa7d8f84a68bbc63ec6ef19cd1a, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=402, earliestPutTs=1733617351347 2024-12-08T00:22:35,013 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 1698624a90bb4beda645c7b182c7707a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=413, earliestPutTs=1733617351964 2024-12-08T00:22:35,013 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4bde131c73f547ea8635ad83bfbdc44c, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=413, earliestPutTs=1733617351964 2024-12-08T00:22:35,013 DEBUG [Thread-2918 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5cfdf76c to 127.0.0.1:62287 2024-12-08T00:22:35,013 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 1d4bc306b02c4f03907aff03ef9c3c5e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=441, earliestPutTs=1733617353132 2024-12-08T00:22:35,013 DEBUG [Thread-2914 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x150e08ed to 127.0.0.1:62287 2024-12-08T00:22:35,013 DEBUG [Thread-2918 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:22:35,013 DEBUG [Thread-2914 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:22:35,013 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4d0609697719490ebbfba985f98ee142, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=441, earliestPutTs=1733617353132 2024-12-08T00:22:35,019 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=8f0d77d608530d497fe4f44ffdd89312] 2024-12-08T00:22:35,019 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8f0d77d608530d497fe4f44ffdd89312#B#compaction#658 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:22:35,020 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/7ae495b0be5446329a17ab9fdcbc6110 is 50, key is test_row_0/B:col10/1733617353746/Put/seqid=0 2024-12-08T00:22:35,020 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412087033727f19654670b444851dc16a8010_8f0d77d608530d497fe4f44ffdd89312 store=[table=TestAcidGuarantees family=A region=8f0d77d608530d497fe4f44ffdd89312] 2024-12-08T00:22:35,022 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412087033727f19654670b444851dc16a8010_8f0d77d608530d497fe4f44ffdd89312, store=[table=TestAcidGuarantees family=A region=8f0d77d608530d497fe4f44ffdd89312] 2024-12-08T00:22:35,022 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412087033727f19654670b444851dc16a8010_8f0d77d608530d497fe4f44ffdd89312 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=8f0d77d608530d497fe4f44ffdd89312] 2024-12-08T00:22:35,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742611_1787 (size=13323) 2024-12-08T00:22:35,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742612_1788 (size=4469) 2024-12-08T00:22:35,102 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:22:35,103 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36703 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=188 2024-12-08T00:22:35,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:35,103 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegion(2837): Flushing 8f0d77d608530d497fe4f44ffdd89312 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-12-08T00:22:35,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=A 2024-12-08T00:22:35,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:35,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=B 2024-12-08T00:22:35,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:35,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=C 2024-12-08T00:22:35,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:35,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120835e683ac65a3488fa125ab804ad42c4a_8f0d77d608530d497fe4f44ffdd89312 is 50, key is test_row_0/A:col10/1733617353748/Put/seqid=0 2024-12-08T00:22:35,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742613_1789 (size=12454) 2024-12-08T00:22:35,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=187 2024-12-08T00:22:35,425 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8f0d77d608530d497fe4f44ffdd89312#A#compaction#659 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:22:35,426 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/c373bb713ea64e448b009acf880aff31 is 175, key is test_row_0/A:col10/1733617353746/Put/seqid=0 2024-12-08T00:22:35,427 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/7ae495b0be5446329a17ab9fdcbc6110 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/7ae495b0be5446329a17ab9fdcbc6110 2024-12-08T00:22:35,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742614_1790 (size=32277) 2024-12-08T00:22:35,431 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 8f0d77d608530d497fe4f44ffdd89312/B of 8f0d77d608530d497fe4f44ffdd89312 into 7ae495b0be5446329a17ab9fdcbc6110(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:22:35,431 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8f0d77d608530d497fe4f44ffdd89312: 2024-12-08T00:22:35,431 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312., storeName=8f0d77d608530d497fe4f44ffdd89312/B, priority=12, startTime=1733617355010; duration=0sec 2024-12-08T00:22:35,431 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T00:22:35,431 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8f0d77d608530d497fe4f44ffdd89312:B 2024-12-08T00:22:35,431 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T00:22:35,432 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50056 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T00:22:35,432 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1540): 8f0d77d608530d497fe4f44ffdd89312/C is initiating minor compaction (all files) 2024-12-08T00:22:35,432 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8f0d77d608530d497fe4f44ffdd89312/C in TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:35,432 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/e96ab5bab97f40f99579fe459feb6abc, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/7adab3c570f14ad68f4dc5f75589ab2d, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/3f78f166cf8249338b7d4a72af0ce3a0, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/653826ddf9ca4af9b479e0bef425ad28] into tmpdir=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp, totalSize=48.9 K 2024-12-08T00:22:35,432 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting e96ab5bab97f40f99579fe459feb6abc, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=373, earliestPutTs=1733617350699 2024-12-08T00:22:35,433 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 7adab3c570f14ad68f4dc5f75589ab2d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=402, earliestPutTs=1733617351347 2024-12-08T00:22:35,433 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 3f78f166cf8249338b7d4a72af0ce3a0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=413, earliestPutTs=1733617351964 2024-12-08T00:22:35,433 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] compactions.Compactor(224): Compacting 653826ddf9ca4af9b479e0bef425ad28, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=441, earliestPutTs=1733617353132 2024-12-08T00:22:35,438 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8f0d77d608530d497fe4f44ffdd89312#C#compaction#661 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T00:22:35,439 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/198ec73927c641ff9fd0e67a5c64e81f is 50, key is test_row_0/C:col10/1733617353746/Put/seqid=0 2024-12-08T00:22:35,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742615_1791 (size=13289) 2024-12-08T00:22:35,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:35,515 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120835e683ac65a3488fa125ab804ad42c4a_8f0d77d608530d497fe4f44ffdd89312 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120835e683ac65a3488fa125ab804ad42c4a_8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:35,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/c487bc1721984b3d9cc55cd21a181401, store: [table=TestAcidGuarantees family=A region=8f0d77d608530d497fe4f44ffdd89312] 2024-12-08T00:22:35,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/c487bc1721984b3d9cc55cd21a181401 is 175, key is test_row_0/A:col10/1733617353748/Put/seqid=0 2024-12-08T00:22:35,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742616_1792 (size=31255) 2024-12-08T00:22:35,833 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/c373bb713ea64e448b009acf880aff31 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/c373bb713ea64e448b009acf880aff31 2024-12-08T00:22:35,836 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 8f0d77d608530d497fe4f44ffdd89312/A of 8f0d77d608530d497fe4f44ffdd89312 into c373bb713ea64e448b009acf880aff31(size=31.5 K), total size for store is 31.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:22:35,836 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8f0d77d608530d497fe4f44ffdd89312: 2024-12-08T00:22:35,837 INFO [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312., storeName=8f0d77d608530d497fe4f44ffdd89312/A, priority=12, startTime=1733617355010; duration=0sec 2024-12-08T00:22:35,837 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:22:35,837 DEBUG [RS:0;017dd09fb407:36703-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8f0d77d608530d497fe4f44ffdd89312:A 2024-12-08T00:22:35,845 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/198ec73927c641ff9fd0e67a5c64e81f as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/198ec73927c641ff9fd0e67a5c64e81f 2024-12-08T00:22:35,848 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 8f0d77d608530d497fe4f44ffdd89312/C of 8f0d77d608530d497fe4f44ffdd89312 into 198ec73927c641ff9fd0e67a5c64e81f(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T00:22:35,848 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8f0d77d608530d497fe4f44ffdd89312: 2024-12-08T00:22:35,848 INFO [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312., storeName=8f0d77d608530d497fe4f44ffdd89312/C, priority=12, startTime=1733617355010; duration=0sec 2024-12-08T00:22:35,848 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T00:22:35,848 DEBUG [RS:0;017dd09fb407:36703-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8f0d77d608530d497fe4f44ffdd89312:C 2024-12-08T00:22:35,878 DEBUG [Thread-2907 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x11030ef5 to 127.0.0.1:62287 2024-12-08T00:22:35,878 DEBUG [Thread-2907 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:22:35,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36703 {}] regionserver.HRegion(8581): Flush requested on 8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:35,882 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. as already flushing 2024-12-08T00:22:35,882 DEBUG [Thread-2909 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x69abefea to 127.0.0.1:62287 2024-12-08T00:22:35,882 DEBUG [Thread-2909 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:22:35,883 DEBUG [Thread-2903 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x05bc9c3e to 127.0.0.1:62287 2024-12-08T00:22:35,883 DEBUG [Thread-2903 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:22:35,919 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=450, memsize=13.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/c487bc1721984b3d9cc55cd21a181401 2024-12-08T00:22:35,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/0049b1d685674d1986de323dd5a39af6 is 50, key is test_row_0/B:col10/1733617353748/Put/seqid=0 2024-12-08T00:22:35,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742617_1793 (size=12301) 2024-12-08T00:22:36,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=187 2024-12-08T00:22:36,327 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=450 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/0049b1d685674d1986de323dd5a39af6 2024-12-08T00:22:36,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/0a1c12a1223e4ac284212cd47f607b8e is 50, key is test_row_0/C:col10/1733617353748/Put/seqid=0 2024-12-08T00:22:36,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742618_1794 (size=12301) 2024-12-08T00:22:36,736 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=450 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/0a1c12a1223e4ac284212cd47f607b8e 2024-12-08T00:22:36,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/c487bc1721984b3d9cc55cd21a181401 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/c487bc1721984b3d9cc55cd21a181401 2024-12-08T00:22:36,741 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/c487bc1721984b3d9cc55cd21a181401, entries=150, sequenceid=450, filesize=30.5 K 2024-12-08T00:22:36,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/0049b1d685674d1986de323dd5a39af6 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/0049b1d685674d1986de323dd5a39af6 2024-12-08T00:22:36,745 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/0049b1d685674d1986de323dd5a39af6, entries=150, sequenceid=450, filesize=12.0 K 2024-12-08T00:22:36,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/0a1c12a1223e4ac284212cd47f607b8e as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/0a1c12a1223e4ac284212cd47f607b8e 2024-12-08T00:22:36,748 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/0a1c12a1223e4ac284212cd47f607b8e, entries=150, sequenceid=450, filesize=12.0 K 2024-12-08T00:22:36,748 INFO [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=20.13 KB/20610 for 8f0d77d608530d497fe4f44ffdd89312 in 1645ms, sequenceid=450, compaction requested=false 2024-12-08T00:22:36,748 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegion(2538): Flush status journal for 8f0d77d608530d497fe4f44ffdd89312: 2024-12-08T00:22:36,748 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:36,748 DEBUG [RS_FLUSH_OPERATIONS-regionserver/017dd09fb407:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=188 2024-12-08T00:22:36,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster(4106): Remote procedure done, pid=188 2024-12-08T00:22:36,750 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=188, resume processing ppid=187 2024-12-08T00:22:36,750 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=188, ppid=187, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.7140 sec 2024-12-08T00:22:36,751 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=187, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=187, table=TestAcidGuarantees in 2.7200 sec 2024-12-08T00:22:37,705 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-08T00:22:38,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=187 2024-12-08T00:22:38,139 INFO [Thread-2911 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 187 completed 2024-12-08T00:22:43,249 DEBUG [Thread-2905 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7181df3b to 127.0.0.1:62287 2024-12-08T00:22:43,250 DEBUG [Thread-2905 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:22:43,329 DEBUG [Thread-2901 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2ac53e79 to 127.0.0.1:62287 2024-12-08T00:22:43,329 DEBUG [Thread-2901 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:22:43,329 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-08T00:22:43,329 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 4 2024-12-08T00:22:43,329 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 131 2024-12-08T00:22:43,329 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 3 2024-12-08T00:22:43,329 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 112 2024-12-08T00:22:43,329 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 109 2024-12-08T00:22:43,329 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-08T00:22:43,329 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6565 2024-12-08T00:22:43,329 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6548 2024-12-08T00:22:43,329 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6494 2024-12-08T00:22:43,329 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6567 2024-12-08T00:22:43,329 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6581 2024-12-08T00:22:43,329 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-08T00:22:43,329 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-08T00:22:43,329 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5765d46a to 127.0.0.1:62287 2024-12-08T00:22:43,329 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:22:43,330 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-08T00:22:43,330 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-08T00:22:43,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=189, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-08T00:22:43,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=189 2024-12-08T00:22:43,332 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733617363332"}]},"ts":"1733617363332"} 2024-12-08T00:22:43,333 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-08T00:22:43,335 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-08T00:22:43,335 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=190, ppid=189, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-08T00:22:43,336 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=191, ppid=190, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=8f0d77d608530d497fe4f44ffdd89312, UNASSIGN}] 2024-12-08T00:22:43,336 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=191, ppid=190, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=8f0d77d608530d497fe4f44ffdd89312, UNASSIGN 2024-12-08T00:22:43,337 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=191 updating hbase:meta row=8f0d77d608530d497fe4f44ffdd89312, regionState=CLOSING, regionLocation=017dd09fb407,36703,1733617179335 2024-12-08T00:22:43,337 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-08T00:22:43,337 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=192, ppid=191, state=RUNNABLE; CloseRegionProcedure 8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335}] 2024-12-08T00:22:43,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=189 2024-12-08T00:22:43,488 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 017dd09fb407,36703,1733617179335 2024-12-08T00:22:43,489 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] handler.UnassignRegionHandler(124): Close 8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:43,489 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-08T00:22:43,489 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1681): Closing 8f0d77d608530d497fe4f44ffdd89312, disabling compactions & flushes 2024-12-08T00:22:43,489 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:43,489 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:43,489 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. after waiting 0 ms 2024-12-08T00:22:43,489 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:43,489 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(2837): Flushing 8f0d77d608530d497fe4f44ffdd89312 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-08T00:22:43,489 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=A 2024-12-08T00:22:43,489 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:43,489 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=B 2024-12-08T00:22:43,489 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:43,489 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8f0d77d608530d497fe4f44ffdd89312, store=C 2024-12-08T00:22:43,489 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T00:22:43,494 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412081d39e040d9744c80b2a4a8ff9602cc57_8f0d77d608530d497fe4f44ffdd89312 is 50, key is test_row_0/A:col10/1733617355877/Put/seqid=0 2024-12-08T00:22:43,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742619_1795 (size=12454) 2024-12-08T00:22:43,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=189 2024-12-08T00:22:43,897 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:22:43,901 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412081d39e040d9744c80b2a4a8ff9602cc57_8f0d77d608530d497fe4f44ffdd89312 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412081d39e040d9744c80b2a4a8ff9602cc57_8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:43,901 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/d79ace7fd317424197e5833f574b2673, store: [table=TestAcidGuarantees family=A region=8f0d77d608530d497fe4f44ffdd89312] 2024-12-08T00:22:43,902 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/d79ace7fd317424197e5833f574b2673 is 175, key is test_row_0/A:col10/1733617355877/Put/seqid=0 2024-12-08T00:22:43,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742620_1796 (size=31255) 2024-12-08T00:22:43,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=189 2024-12-08T00:22:44,305 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=461, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/d79ace7fd317424197e5833f574b2673 2024-12-08T00:22:44,310 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/f917753b193949139089c3994d18c3b0 is 50, key is test_row_0/B:col10/1733617355877/Put/seqid=0 2024-12-08T00:22:44,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742621_1797 (size=12301) 2024-12-08T00:22:44,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=189 2024-12-08T00:22:44,713 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=461 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/f917753b193949139089c3994d18c3b0 2024-12-08T00:22:44,718 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/f491fe73c6c34e91a72710ebc6c6e2ea is 50, key is test_row_0/C:col10/1733617355877/Put/seqid=0 2024-12-08T00:22:44,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742622_1798 (size=12301) 2024-12-08T00:22:45,121 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=461 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/f491fe73c6c34e91a72710ebc6c6e2ea 2024-12-08T00:22:45,124 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/A/d79ace7fd317424197e5833f574b2673 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/d79ace7fd317424197e5833f574b2673 2024-12-08T00:22:45,127 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/d79ace7fd317424197e5833f574b2673, entries=150, sequenceid=461, filesize=30.5 K 2024-12-08T00:22:45,127 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/B/f917753b193949139089c3994d18c3b0 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/f917753b193949139089c3994d18c3b0 2024-12-08T00:22:45,129 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/f917753b193949139089c3994d18c3b0, entries=150, sequenceid=461, filesize=12.0 K 2024-12-08T00:22:45,130 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/.tmp/C/f491fe73c6c34e91a72710ebc6c6e2ea as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/f491fe73c6c34e91a72710ebc6c6e2ea 2024-12-08T00:22:45,132 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/f491fe73c6c34e91a72710ebc6c6e2ea, entries=150, sequenceid=461, filesize=12.0 K 2024-12-08T00:22:45,132 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 8f0d77d608530d497fe4f44ffdd89312 in 1643ms, sequenceid=461, compaction requested=true 2024-12-08T00:22:45,133 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/4529900537b94db9aab1f3fe9c306f41, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/4b916f9b67c04fe0b5996cacc56d2559, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/6eb167abc5724f94b3eee1e92baa904b, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/af00e3df741e4e309fe8d7a4c74a0fe4, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/b2ddab6d65e64e4cb413c203bda539f2, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/cb923cf8ef0d4f718bdf2589cb72bb42, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/02ae70a374fb43a8902ec9dcc02427ba, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/492a037176ba4f7cb5971051353529c2, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/fb6a8d43b1494f5b8ebc223751787019, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/edee421daf5f4cb9a6e875e1c2c46d66, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/5f8157212d3246d8b5ce58c89600539e, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/7341f09dd64f40ea8a18beef4634e21a, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/1771ce5741e8473b858cb2cd2c8fcc3f, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/ae4162dfed424dcf93250c43d47f7d79, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/3b2616f3dd53468784f301426382871e, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/4803b19a6b8d4d8c800574d300e7205f, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/e4956cfedce84290bb376e027c27dbb7, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/e0dd830975bc4fb8aa7b0e93b7b292bb, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/2cc49d398ea64872adf64bd04ebe200d, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/c25751b8aa0a45a88e9f681eb99f2649, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/e61f4e8185c7489ca0d584530cf5dfac, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/6316da2d6cb944df900a2e83ba14168d, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/ed28093fa6784bc79f93f4a6df91f9b4, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/f51250484e2e4e27815193e9d0b4e43f, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/dcb4928b652a4be583cf0bcccb1f38ae, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/82f2d1a4855643b4865cf88ef7bf562c, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/48088d60396f4b2ba2884a96f634c225, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/712caaa7d8f84a68bbc63ec6ef19cd1a, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/4bde131c73f547ea8635ad83bfbdc44c, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/4d0609697719490ebbfba985f98ee142] to archive 2024-12-08T00:22:45,133 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-08T00:22:45,134 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/4529900537b94db9aab1f3fe9c306f41 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/4529900537b94db9aab1f3fe9c306f41 2024-12-08T00:22:45,135 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/4b916f9b67c04fe0b5996cacc56d2559 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/4b916f9b67c04fe0b5996cacc56d2559 2024-12-08T00:22:45,136 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/6eb167abc5724f94b3eee1e92baa904b to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/6eb167abc5724f94b3eee1e92baa904b 2024-12-08T00:22:45,137 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/af00e3df741e4e309fe8d7a4c74a0fe4 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/af00e3df741e4e309fe8d7a4c74a0fe4 2024-12-08T00:22:45,137 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/b2ddab6d65e64e4cb413c203bda539f2 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/b2ddab6d65e64e4cb413c203bda539f2 2024-12-08T00:22:45,138 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/cb923cf8ef0d4f718bdf2589cb72bb42 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/cb923cf8ef0d4f718bdf2589cb72bb42 2024-12-08T00:22:45,139 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/02ae70a374fb43a8902ec9dcc02427ba to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/02ae70a374fb43a8902ec9dcc02427ba 2024-12-08T00:22:45,139 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/492a037176ba4f7cb5971051353529c2 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/492a037176ba4f7cb5971051353529c2 2024-12-08T00:22:45,140 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/fb6a8d43b1494f5b8ebc223751787019 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/fb6a8d43b1494f5b8ebc223751787019 2024-12-08T00:22:45,141 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/edee421daf5f4cb9a6e875e1c2c46d66 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/edee421daf5f4cb9a6e875e1c2c46d66 2024-12-08T00:22:45,142 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/5f8157212d3246d8b5ce58c89600539e to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/5f8157212d3246d8b5ce58c89600539e 2024-12-08T00:22:45,142 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/7341f09dd64f40ea8a18beef4634e21a to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/7341f09dd64f40ea8a18beef4634e21a 2024-12-08T00:22:45,143 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/1771ce5741e8473b858cb2cd2c8fcc3f to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/1771ce5741e8473b858cb2cd2c8fcc3f 2024-12-08T00:22:45,144 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/ae4162dfed424dcf93250c43d47f7d79 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/ae4162dfed424dcf93250c43d47f7d79 2024-12-08T00:22:45,144 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/3b2616f3dd53468784f301426382871e to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/3b2616f3dd53468784f301426382871e 2024-12-08T00:22:45,145 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/4803b19a6b8d4d8c800574d300e7205f to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/4803b19a6b8d4d8c800574d300e7205f 2024-12-08T00:22:45,146 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/e4956cfedce84290bb376e027c27dbb7 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/e4956cfedce84290bb376e027c27dbb7 2024-12-08T00:22:45,147 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/e0dd830975bc4fb8aa7b0e93b7b292bb to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/e0dd830975bc4fb8aa7b0e93b7b292bb 2024-12-08T00:22:45,147 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/2cc49d398ea64872adf64bd04ebe200d to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/2cc49d398ea64872adf64bd04ebe200d 2024-12-08T00:22:45,148 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/c25751b8aa0a45a88e9f681eb99f2649 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/c25751b8aa0a45a88e9f681eb99f2649 2024-12-08T00:22:45,149 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/e61f4e8185c7489ca0d584530cf5dfac to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/e61f4e8185c7489ca0d584530cf5dfac 2024-12-08T00:22:45,149 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/6316da2d6cb944df900a2e83ba14168d to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/6316da2d6cb944df900a2e83ba14168d 2024-12-08T00:22:45,150 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/ed28093fa6784bc79f93f4a6df91f9b4 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/ed28093fa6784bc79f93f4a6df91f9b4 2024-12-08T00:22:45,151 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/f51250484e2e4e27815193e9d0b4e43f to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/f51250484e2e4e27815193e9d0b4e43f 2024-12-08T00:22:45,152 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/dcb4928b652a4be583cf0bcccb1f38ae to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/dcb4928b652a4be583cf0bcccb1f38ae 2024-12-08T00:22:45,152 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/82f2d1a4855643b4865cf88ef7bf562c to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/82f2d1a4855643b4865cf88ef7bf562c 2024-12-08T00:22:45,153 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/48088d60396f4b2ba2884a96f634c225 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/48088d60396f4b2ba2884a96f634c225 2024-12-08T00:22:45,154 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/712caaa7d8f84a68bbc63ec6ef19cd1a to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/712caaa7d8f84a68bbc63ec6ef19cd1a 2024-12-08T00:22:45,155 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/4bde131c73f547ea8635ad83bfbdc44c to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/4bde131c73f547ea8635ad83bfbdc44c 2024-12-08T00:22:45,155 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/4d0609697719490ebbfba985f98ee142 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/4d0609697719490ebbfba985f98ee142 2024-12-08T00:22:45,156 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/149f6c5aad8c4ab3a2ec22771afe4276, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/7892de1ae6d04801ad3f643cf7af3bd7, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/ff281ce7e7734a03a21ccd1517c49d31, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/d4844ba881b944ebae04b11c586d0788, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/275da2b9671e41788c9145378fc97a45, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/b4eeff693b944df9bd9390ad24b74cf0, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/db5b8f0153c749039f167b727c99dd77, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/6cb5321d290647de85f7f67f79e2270a, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/ae9f34b7df4345b7b814435331e9d580, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/a659ff4523a74bc8bead45885d0bf0ae, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/201ae3c5dd5c4c279046f18a0c1b1013, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/6995796b8b55439e95e07f369ee5de1f, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/5fc2846ab46f4905998be69e2c7eacce, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/f37f020a22de48c39cf5c43c4af8bbc4, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/dbddeb9674ad4f5fa12e24982174b2fc, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/bf08613baed44a719083014303e545b6, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/95be750086e042698d6129488c2a2dac, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/8e9a12ed547e4e2399b2c32ba8aea478, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/6968b09de5144176abf3f8e5bc1974bc, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/224f7e500c49461abda316f8f2985602, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/a813e349af604e65b12cf9e052645cb1, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/1318f2a27a47473086bcb380fff179a3, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/98a191ecf15741eda2b55a9bc32d07b0, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/52e243dec21a41ae95097df1ff475ac8, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/aa69d594e026482d9cd1f9fc3d34b723, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/4df6ba8c9298476da6962301ae2b1785, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/fd94d9216e7d4c8382934197a2efdb71, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/c4d456ffb9e34315b5dfbe6c34329dc2, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/1698624a90bb4beda645c7b182c7707a, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/1d4bc306b02c4f03907aff03ef9c3c5e] to archive 2024-12-08T00:22:45,157 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-08T00:22:45,158 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/149f6c5aad8c4ab3a2ec22771afe4276 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/149f6c5aad8c4ab3a2ec22771afe4276 2024-12-08T00:22:45,159 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/7892de1ae6d04801ad3f643cf7af3bd7 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/7892de1ae6d04801ad3f643cf7af3bd7 2024-12-08T00:22:45,159 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/ff281ce7e7734a03a21ccd1517c49d31 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/ff281ce7e7734a03a21ccd1517c49d31 2024-12-08T00:22:45,160 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/d4844ba881b944ebae04b11c586d0788 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/d4844ba881b944ebae04b11c586d0788 2024-12-08T00:22:45,161 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/275da2b9671e41788c9145378fc97a45 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/275da2b9671e41788c9145378fc97a45 2024-12-08T00:22:45,161 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/b4eeff693b944df9bd9390ad24b74cf0 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/b4eeff693b944df9bd9390ad24b74cf0 2024-12-08T00:22:45,162 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/db5b8f0153c749039f167b727c99dd77 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/db5b8f0153c749039f167b727c99dd77 2024-12-08T00:22:45,162 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/6cb5321d290647de85f7f67f79e2270a to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/6cb5321d290647de85f7f67f79e2270a 2024-12-08T00:22:45,163 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/ae9f34b7df4345b7b814435331e9d580 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/ae9f34b7df4345b7b814435331e9d580 2024-12-08T00:22:45,164 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/a659ff4523a74bc8bead45885d0bf0ae to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/a659ff4523a74bc8bead45885d0bf0ae 2024-12-08T00:22:45,164 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/201ae3c5dd5c4c279046f18a0c1b1013 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/201ae3c5dd5c4c279046f18a0c1b1013 2024-12-08T00:22:45,165 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/6995796b8b55439e95e07f369ee5de1f to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/6995796b8b55439e95e07f369ee5de1f 2024-12-08T00:22:45,166 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/5fc2846ab46f4905998be69e2c7eacce to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/5fc2846ab46f4905998be69e2c7eacce 2024-12-08T00:22:45,166 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/f37f020a22de48c39cf5c43c4af8bbc4 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/f37f020a22de48c39cf5c43c4af8bbc4 2024-12-08T00:22:45,167 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/dbddeb9674ad4f5fa12e24982174b2fc to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/dbddeb9674ad4f5fa12e24982174b2fc 2024-12-08T00:22:45,168 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/bf08613baed44a719083014303e545b6 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/bf08613baed44a719083014303e545b6 2024-12-08T00:22:45,168 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/95be750086e042698d6129488c2a2dac to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/95be750086e042698d6129488c2a2dac 2024-12-08T00:22:45,169 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/8e9a12ed547e4e2399b2c32ba8aea478 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/8e9a12ed547e4e2399b2c32ba8aea478 2024-12-08T00:22:45,170 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/6968b09de5144176abf3f8e5bc1974bc to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/6968b09de5144176abf3f8e5bc1974bc 2024-12-08T00:22:45,170 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/224f7e500c49461abda316f8f2985602 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/224f7e500c49461abda316f8f2985602 2024-12-08T00:22:45,171 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/a813e349af604e65b12cf9e052645cb1 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/a813e349af604e65b12cf9e052645cb1 2024-12-08T00:22:45,172 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/1318f2a27a47473086bcb380fff179a3 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/1318f2a27a47473086bcb380fff179a3 2024-12-08T00:22:45,172 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/98a191ecf15741eda2b55a9bc32d07b0 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/98a191ecf15741eda2b55a9bc32d07b0 2024-12-08T00:22:45,173 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/52e243dec21a41ae95097df1ff475ac8 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/52e243dec21a41ae95097df1ff475ac8 2024-12-08T00:22:45,174 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/aa69d594e026482d9cd1f9fc3d34b723 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/aa69d594e026482d9cd1f9fc3d34b723 2024-12-08T00:22:45,175 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/4df6ba8c9298476da6962301ae2b1785 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/4df6ba8c9298476da6962301ae2b1785 2024-12-08T00:22:45,176 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/fd94d9216e7d4c8382934197a2efdb71 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/fd94d9216e7d4c8382934197a2efdb71 2024-12-08T00:22:45,177 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/c4d456ffb9e34315b5dfbe6c34329dc2 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/c4d456ffb9e34315b5dfbe6c34329dc2 2024-12-08T00:22:45,177 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/1698624a90bb4beda645c7b182c7707a to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/1698624a90bb4beda645c7b182c7707a 2024-12-08T00:22:45,178 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/1d4bc306b02c4f03907aff03ef9c3c5e to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/1d4bc306b02c4f03907aff03ef9c3c5e 2024-12-08T00:22:45,179 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/6e038cb800be4b638348d8254525ae9b, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/1532fdc91e024c1190826ca5728b2759, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/c87ba21481ab48058066a5a1aa551511, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/cc6776a0821c4816afdcfa6520a04205, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/765a576133f141749322e3ee69531ece, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/bc1afc60aaad4871b19aa3152e8dbea3, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/c41f768a6d5746e58f4a98dbb4016985, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/3e9d7db9c9c342179f7bfe52ef736d54, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/9c435a3cf6924972bd29422e36b3e1dc, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/c63711b4e40e482b85f7d3286ff28422, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/1be7bb01eabe4bd58ee7d4a68e118ec2, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/227abefac4f349f989d09b86d9d272ee, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/8901f5eb781a420792342a0f05f30bed, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/93f6e066b27844a997844ff51e5daf4c, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/c3228171656e46c5b654457ec2729410, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/42e00882621447b394c8c350a878af9e, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/fbd5c0053c43439084121ec5e367c58b, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/d01712fae9904c39b51e2c8f6710b721, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/dc48c177d5174a26bb7663979c9c018c, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/e8300cf7c59847f4a7dc111878682114, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/b87959ce3a954d999811cbaf4ad45c67, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/dcb9715f62c2424095303c6e10d404c0, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/9e631efa2e8246db8206f2b924274eb7, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/b7503c6105fd42b2aa4c80f634892954, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/e96ab5bab97f40f99579fe459feb6abc, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/3867431e5b764d308a959c3a122990bd, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/7adab3c570f14ad68f4dc5f75589ab2d, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/3f78f166cf8249338b7d4a72af0ce3a0, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/653826ddf9ca4af9b479e0bef425ad28] to archive 2024-12-08T00:22:45,180 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-08T00:22:45,181 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/6e038cb800be4b638348d8254525ae9b to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/6e038cb800be4b638348d8254525ae9b 2024-12-08T00:22:45,181 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/1532fdc91e024c1190826ca5728b2759 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/1532fdc91e024c1190826ca5728b2759 2024-12-08T00:22:45,182 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/c87ba21481ab48058066a5a1aa551511 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/c87ba21481ab48058066a5a1aa551511 2024-12-08T00:22:45,183 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/cc6776a0821c4816afdcfa6520a04205 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/cc6776a0821c4816afdcfa6520a04205 2024-12-08T00:22:45,183 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/765a576133f141749322e3ee69531ece to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/765a576133f141749322e3ee69531ece 2024-12-08T00:22:45,184 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/bc1afc60aaad4871b19aa3152e8dbea3 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/bc1afc60aaad4871b19aa3152e8dbea3 2024-12-08T00:22:45,184 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/c41f768a6d5746e58f4a98dbb4016985 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/c41f768a6d5746e58f4a98dbb4016985 2024-12-08T00:22:45,185 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/3e9d7db9c9c342179f7bfe52ef736d54 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/3e9d7db9c9c342179f7bfe52ef736d54 2024-12-08T00:22:45,186 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/9c435a3cf6924972bd29422e36b3e1dc to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/9c435a3cf6924972bd29422e36b3e1dc 2024-12-08T00:22:45,186 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/c63711b4e40e482b85f7d3286ff28422 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/c63711b4e40e482b85f7d3286ff28422 2024-12-08T00:22:45,187 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/1be7bb01eabe4bd58ee7d4a68e118ec2 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/1be7bb01eabe4bd58ee7d4a68e118ec2 2024-12-08T00:22:45,187 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/227abefac4f349f989d09b86d9d272ee to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/227abefac4f349f989d09b86d9d272ee 2024-12-08T00:22:45,188 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/8901f5eb781a420792342a0f05f30bed to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/8901f5eb781a420792342a0f05f30bed 2024-12-08T00:22:45,189 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/93f6e066b27844a997844ff51e5daf4c to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/93f6e066b27844a997844ff51e5daf4c 2024-12-08T00:22:45,190 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/c3228171656e46c5b654457ec2729410 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/c3228171656e46c5b654457ec2729410 2024-12-08T00:22:45,190 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/42e00882621447b394c8c350a878af9e to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/42e00882621447b394c8c350a878af9e 2024-12-08T00:22:45,191 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/fbd5c0053c43439084121ec5e367c58b to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/fbd5c0053c43439084121ec5e367c58b 2024-12-08T00:22:45,192 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/d01712fae9904c39b51e2c8f6710b721 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/d01712fae9904c39b51e2c8f6710b721 2024-12-08T00:22:45,193 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/dc48c177d5174a26bb7663979c9c018c to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/dc48c177d5174a26bb7663979c9c018c 2024-12-08T00:22:45,194 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/e8300cf7c59847f4a7dc111878682114 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/e8300cf7c59847f4a7dc111878682114 2024-12-08T00:22:45,194 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/b87959ce3a954d999811cbaf4ad45c67 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/b87959ce3a954d999811cbaf4ad45c67 2024-12-08T00:22:45,195 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/dcb9715f62c2424095303c6e10d404c0 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/dcb9715f62c2424095303c6e10d404c0 2024-12-08T00:22:45,196 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/9e631efa2e8246db8206f2b924274eb7 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/9e631efa2e8246db8206f2b924274eb7 2024-12-08T00:22:45,197 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/b7503c6105fd42b2aa4c80f634892954 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/b7503c6105fd42b2aa4c80f634892954 2024-12-08T00:22:45,197 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/e96ab5bab97f40f99579fe459feb6abc to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/e96ab5bab97f40f99579fe459feb6abc 2024-12-08T00:22:45,198 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/3867431e5b764d308a959c3a122990bd to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/3867431e5b764d308a959c3a122990bd 2024-12-08T00:22:45,199 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/7adab3c570f14ad68f4dc5f75589ab2d to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/7adab3c570f14ad68f4dc5f75589ab2d 2024-12-08T00:22:45,199 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/3f78f166cf8249338b7d4a72af0ce3a0 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/3f78f166cf8249338b7d4a72af0ce3a0 2024-12-08T00:22:45,200 DEBUG [StoreCloser-TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/653826ddf9ca4af9b479e0bef425ad28 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/653826ddf9ca4af9b479e0bef425ad28 2024-12-08T00:22:45,203 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/recovered.edits/464.seqid, newMaxSeqId=464, maxSeqId=4 2024-12-08T00:22:45,204 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312. 2024-12-08T00:22:45,204 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1635): Region close journal for 8f0d77d608530d497fe4f44ffdd89312: 2024-12-08T00:22:45,205 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] handler.UnassignRegionHandler(170): Closed 8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:45,205 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=191 updating hbase:meta row=8f0d77d608530d497fe4f44ffdd89312, regionState=CLOSED 2024-12-08T00:22:45,207 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=192, resume processing ppid=191 2024-12-08T00:22:45,207 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=192, ppid=191, state=SUCCESS; CloseRegionProcedure 8f0d77d608530d497fe4f44ffdd89312, server=017dd09fb407,36703,1733617179335 in 1.8690 sec 2024-12-08T00:22:45,208 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=191, resume processing ppid=190 2024-12-08T00:22:45,208 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=191, ppid=190, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=8f0d77d608530d497fe4f44ffdd89312, UNASSIGN in 1.8710 sec 2024-12-08T00:22:45,209 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=190, resume processing ppid=189 2024-12-08T00:22:45,209 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=190, ppid=189, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.8730 sec 2024-12-08T00:22:45,210 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733617365210"}]},"ts":"1733617365210"} 2024-12-08T00:22:45,211 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-08T00:22:45,212 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-08T00:22:45,213 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=189, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.8830 sec 2024-12-08T00:22:45,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=189 2024-12-08T00:22:45,436 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 189 completed 2024-12-08T00:22:45,436 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-08T00:22:45,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] procedure2.ProcedureExecutor(1098): Stored pid=193, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T00:22:45,437 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=193, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T00:22:45,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=193 2024-12-08T00:22:45,438 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=193, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T00:22:45,439 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:45,440 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A, FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B, FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C, FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/recovered.edits] 2024-12-08T00:22:45,442 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/c373bb713ea64e448b009acf880aff31 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/c373bb713ea64e448b009acf880aff31 2024-12-08T00:22:45,443 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/c487bc1721984b3d9cc55cd21a181401 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/c487bc1721984b3d9cc55cd21a181401 2024-12-08T00:22:45,444 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/d79ace7fd317424197e5833f574b2673 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/A/d79ace7fd317424197e5833f574b2673 2024-12-08T00:22:45,446 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/0049b1d685674d1986de323dd5a39af6 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/0049b1d685674d1986de323dd5a39af6 2024-12-08T00:22:45,446 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/7ae495b0be5446329a17ab9fdcbc6110 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/7ae495b0be5446329a17ab9fdcbc6110 2024-12-08T00:22:45,447 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/f917753b193949139089c3994d18c3b0 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/B/f917753b193949139089c3994d18c3b0 2024-12-08T00:22:45,449 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/0a1c12a1223e4ac284212cd47f607b8e to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/0a1c12a1223e4ac284212cd47f607b8e 2024-12-08T00:22:45,450 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/198ec73927c641ff9fd0e67a5c64e81f to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/198ec73927c641ff9fd0e67a5c64e81f 2024-12-08T00:22:45,451 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/f491fe73c6c34e91a72710ebc6c6e2ea to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/C/f491fe73c6c34e91a72710ebc6c6e2ea 2024-12-08T00:22:45,453 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/recovered.edits/464.seqid to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312/recovered.edits/464.seqid 2024-12-08T00:22:45,453 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/default/TestAcidGuarantees/8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:45,453 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-08T00:22:45,453 DEBUG [PEWorker-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-08T00:22:45,454 DEBUG [PEWorker-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-08T00:22:45,456 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412081a4597b41e1749e682e803c694f037d4_8f0d77d608530d497fe4f44ffdd89312 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412081a4597b41e1749e682e803c694f037d4_8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:45,457 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412081d39e040d9744c80b2a4a8ff9602cc57_8f0d77d608530d497fe4f44ffdd89312 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412081d39e040d9744c80b2a4a8ff9602cc57_8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:45,458 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120835e683ac65a3488fa125ab804ad42c4a_8f0d77d608530d497fe4f44ffdd89312 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120835e683ac65a3488fa125ab804ad42c4a_8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:45,459 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208370f4c338ae2437aa42a8d087ffdc403_8f0d77d608530d497fe4f44ffdd89312 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208370f4c338ae2437aa42a8d087ffdc403_8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:45,460 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412083710b38ccf8a492c9511a2e90023977d_8f0d77d608530d497fe4f44ffdd89312 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412083710b38ccf8a492c9511a2e90023977d_8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:45,461 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208372afe1017f041f99602c6a49f0d10f4_8f0d77d608530d497fe4f44ffdd89312 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208372afe1017f041f99602c6a49f0d10f4_8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:45,462 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412083f855968f4254b549d7bf1a087d8faf5_8f0d77d608530d497fe4f44ffdd89312 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412083f855968f4254b549d7bf1a087d8faf5_8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:45,463 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208431bba8f40684b368dfbf9346deec52f_8f0d77d608530d497fe4f44ffdd89312 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208431bba8f40684b368dfbf9346deec52f_8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:45,464 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412084464477a9cbb44fb93e98705ed2abc23_8f0d77d608530d497fe4f44ffdd89312 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412084464477a9cbb44fb93e98705ed2abc23_8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:45,464 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412084b8a2a7766ec452d8255a9b35dc42c76_8f0d77d608530d497fe4f44ffdd89312 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412084b8a2a7766ec452d8255a9b35dc42c76_8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:45,465 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120863cb386307174a2a851fcce21a52317e_8f0d77d608530d497fe4f44ffdd89312 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120863cb386307174a2a851fcce21a52317e_8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:45,466 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412086b1e90c657274273919ed4b344b14848_8f0d77d608530d497fe4f44ffdd89312 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412086b1e90c657274273919ed4b344b14848_8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:45,467 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120880144989d03740578fc18a21c70dd4ef_8f0d77d608530d497fe4f44ffdd89312 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120880144989d03740578fc18a21c70dd4ef_8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:45,468 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120884a7a7fc24ca41ebac29b80fa72592b0_8f0d77d608530d497fe4f44ffdd89312 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120884a7a7fc24ca41ebac29b80fa72592b0_8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:45,469 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412088eb684d18d024ebfac5387b512915c3b_8f0d77d608530d497fe4f44ffdd89312 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412088eb684d18d024ebfac5387b512915c3b_8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:45,469 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120890e362059d594b3c88d866345f97140d_8f0d77d608530d497fe4f44ffdd89312 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120890e362059d594b3c88d866345f97140d_8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:45,470 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208a072cbdaf20c4939acc0459074e40c70_8f0d77d608530d497fe4f44ffdd89312 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208a072cbdaf20c4939acc0459074e40c70_8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:45,471 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208b34c3ee2055d4aaaa73c4c264e2636cd_8f0d77d608530d497fe4f44ffdd89312 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208b34c3ee2055d4aaaa73c4c264e2636cd_8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:45,472 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208b5e8fd16d0534ba590be449de36bd19e_8f0d77d608530d497fe4f44ffdd89312 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208b5e8fd16d0534ba590be449de36bd19e_8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:45,473 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208ca49dd4cfd8e4eebb13a2f3f772642f7_8f0d77d608530d497fe4f44ffdd89312 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208ca49dd4cfd8e4eebb13a2f3f772642f7_8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:45,474 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208cc31f4fd0fa745f597b443b811ba062a_8f0d77d608530d497fe4f44ffdd89312 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208cc31f4fd0fa745f597b443b811ba062a_8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:45,474 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208df468b0fd9904e79bac24f1cce28de32_8f0d77d608530d497fe4f44ffdd89312 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208df468b0fd9904e79bac24f1cce28de32_8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:45,475 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208e43c6659ef9b4ed78d826346e75bae4b_8f0d77d608530d497fe4f44ffdd89312 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208e43c6659ef9b4ed78d826346e75bae4b_8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:45,476 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208f0f2531aaec44217b3b0a223cda7bf36_8f0d77d608530d497fe4f44ffdd89312 to hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208f0f2531aaec44217b3b0a223cda7bf36_8f0d77d608530d497fe4f44ffdd89312 2024-12-08T00:22:45,477 DEBUG [PEWorker-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-08T00:22:45,478 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=193, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T00:22:45,480 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-08T00:22:45,482 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-12-08T00:22:45,482 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=193, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T00:22:45,482 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-12-08T00:22:45,483 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733617365482"}]},"ts":"9223372036854775807"} 2024-12-08T00:22:45,484 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-08T00:22:45,484 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 8f0d77d608530d497fe4f44ffdd89312, NAME => 'TestAcidGuarantees,,1733617331911.8f0d77d608530d497fe4f44ffdd89312.', STARTKEY => '', ENDKEY => ''}] 2024-12-08T00:22:45,484 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-12-08T00:22:45,484 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733617365484"}]},"ts":"9223372036854775807"} 2024-12-08T00:22:45,485 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-08T00:22:45,487 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=193, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T00:22:45,488 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=193, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 51 msec 2024-12-08T00:22:45,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44717 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=193 2024-12-08T00:22:45,539 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 193 completed 2024-12-08T00:22:45,549 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=236 (was 237), OpenFileDescriptor=445 (was 448), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=364 (was 357) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=7502 (was 7531) 2024-12-08T00:22:45,549 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-08T00:22:45,549 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-08T00:22:45,549 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5e83c466 to 127.0.0.1:62287 2024-12-08T00:22:45,549 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:22:45,549 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-08T00:22:45,549 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1173057206, stopped=false 2024-12-08T00:22:45,550 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=017dd09fb407,44717,1733617178577 2024-12-08T00:22:45,551 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36703-0x1006efe8d8b0001, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T00:22:45,551 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44717-0x1006efe8d8b0000, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T00:22:45,551 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-08T00:22:45,552 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44717-0x1006efe8d8b0000, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:22:45,552 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36703-0x1006efe8d8b0001, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:22:45,552 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:22:45,552 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '017dd09fb407,36703,1733617179335' ***** 2024-12-08T00:22:45,552 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-08T00:22:45,552 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:44717-0x1006efe8d8b0000, quorum=127.0.0.1:62287, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T00:22:45,552 INFO [RS:0;017dd09fb407:36703 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-08T00:22:45,552 INFO [RS:0;017dd09fb407:36703 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-08T00:22:45,552 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36703-0x1006efe8d8b0001, quorum=127.0.0.1:62287, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T00:22:45,553 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-08T00:22:45,553 INFO [RS:0;017dd09fb407:36703 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-08T00:22:45,553 INFO [RS:0;017dd09fb407:36703 {}] regionserver.HRegionServer(3579): Received CLOSE for 4a133571fbb9d65d8cbb8c5be599e94a 2024-12-08T00:22:45,553 INFO [RS:0;017dd09fb407:36703 {}] regionserver.HRegionServer(1224): stopping server 017dd09fb407,36703,1733617179335 2024-12-08T00:22:45,553 DEBUG [RS:0;017dd09fb407:36703 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:22:45,553 INFO [RS:0;017dd09fb407:36703 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-08T00:22:45,553 INFO [RS:0;017dd09fb407:36703 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-08T00:22:45,553 INFO [RS:0;017dd09fb407:36703 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-08T00:22:45,553 INFO [RS:0;017dd09fb407:36703 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-08T00:22:45,554 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 4a133571fbb9d65d8cbb8c5be599e94a, disabling compactions & flushes 2024-12-08T00:22:45,554 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733617182132.4a133571fbb9d65d8cbb8c5be599e94a. 2024-12-08T00:22:45,554 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733617182132.4a133571fbb9d65d8cbb8c5be599e94a. 2024-12-08T00:22:45,554 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733617182132.4a133571fbb9d65d8cbb8c5be599e94a. after waiting 0 ms 2024-12-08T00:22:45,554 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733617182132.4a133571fbb9d65d8cbb8c5be599e94a. 2024-12-08T00:22:45,554 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 4a133571fbb9d65d8cbb8c5be599e94a 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-08T00:22:45,554 INFO [RS:0;017dd09fb407:36703 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-12-08T00:22:45,554 DEBUG [RS:0;017dd09fb407:36703 {}] regionserver.HRegionServer(1603): Online Regions={4a133571fbb9d65d8cbb8c5be599e94a=hbase:namespace,,1733617182132.4a133571fbb9d65d8cbb8c5be599e94a., 1588230740=hbase:meta,,1.1588230740} 2024-12-08T00:22:45,554 DEBUG [RS_CLOSE_META-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-08T00:22:45,554 INFO [RS_CLOSE_META-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-08T00:22:45,554 DEBUG [RS_CLOSE_META-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-08T00:22:45,554 DEBUG [RS_CLOSE_META-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-08T00:22:45,554 DEBUG [RS_CLOSE_META-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-08T00:22:45,554 INFO [RS_CLOSE_META-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=20.55 KB heapSize=35.87 KB 2024-12-08T00:22:45,558 DEBUG [RS:0;017dd09fb407:36703 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 4a133571fbb9d65d8cbb8c5be599e94a 2024-12-08T00:22:45,570 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/hbase/namespace/4a133571fbb9d65d8cbb8c5be599e94a/.tmp/info/b4a0a6d41d2e4f3ba514361e781b6247 is 45, key is default/info:d/1733617183594/Put/seqid=0 2024-12-08T00:22:45,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742623_1799 (size=5037) 2024-12-08T00:22:45,585 DEBUG [RS_CLOSE_META-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/hbase/meta/1588230740/.tmp/info/0c6b0531bd284d5baf784df5a7ccad6d is 143, key is hbase:namespace,,1733617182132.4a133571fbb9d65d8cbb8c5be599e94a./info:regioninfo/1733617183436/Put/seqid=0 2024-12-08T00:22:45,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742624_1800 (size=7725) 2024-12-08T00:22:45,647 INFO [regionserver/017dd09fb407:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T00:22:45,758 DEBUG [RS:0;017dd09fb407:36703 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 4a133571fbb9d65d8cbb8c5be599e94a 2024-12-08T00:22:45,958 DEBUG [RS:0;017dd09fb407:36703 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 4a133571fbb9d65d8cbb8c5be599e94a 2024-12-08T00:22:45,975 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/hbase/namespace/4a133571fbb9d65d8cbb8c5be599e94a/.tmp/info/b4a0a6d41d2e4f3ba514361e781b6247 2024-12-08T00:22:45,978 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/hbase/namespace/4a133571fbb9d65d8cbb8c5be599e94a/.tmp/info/b4a0a6d41d2e4f3ba514361e781b6247 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/hbase/namespace/4a133571fbb9d65d8cbb8c5be599e94a/info/b4a0a6d41d2e4f3ba514361e781b6247 2024-12-08T00:22:45,980 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/hbase/namespace/4a133571fbb9d65d8cbb8c5be599e94a/info/b4a0a6d41d2e4f3ba514361e781b6247, entries=2, sequenceid=6, filesize=4.9 K 2024-12-08T00:22:45,981 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 4a133571fbb9d65d8cbb8c5be599e94a in 427ms, sequenceid=6, compaction requested=false 2024-12-08T00:22:45,984 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/hbase/namespace/4a133571fbb9d65d8cbb8c5be599e94a/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-08T00:22:45,984 INFO [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1733617182132.4a133571fbb9d65d8cbb8c5be599e94a. 2024-12-08T00:22:45,984 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 4a133571fbb9d65d8cbb8c5be599e94a: 2024-12-08T00:22:45,984 DEBUG [RS_CLOSE_REGION-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1733617182132.4a133571fbb9d65d8cbb8c5be599e94a. 2024-12-08T00:22:45,989 INFO [RS_CLOSE_META-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/hbase/meta/1588230740/.tmp/info/0c6b0531bd284d5baf784df5a7ccad6d 2024-12-08T00:22:46,006 DEBUG [RS_CLOSE_META-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/hbase/meta/1588230740/.tmp/rep_barrier/2c466a2d57844795a37dd1dac6c8289d is 102, key is TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce./rep_barrier:/1733617214213/DeleteFamily/seqid=0 2024-12-08T00:22:46,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742625_1801 (size=6025) 2024-12-08T00:22:46,158 DEBUG [RS:0;017dd09fb407:36703 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-08T00:22:46,220 INFO [regionserver/017dd09fb407:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-08T00:22:46,220 INFO [regionserver/017dd09fb407:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-08T00:22:46,359 DEBUG [RS:0;017dd09fb407:36703 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-08T00:22:46,409 INFO [RS_CLOSE_META-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=588 B at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/hbase/meta/1588230740/.tmp/rep_barrier/2c466a2d57844795a37dd1dac6c8289d 2024-12-08T00:22:46,427 DEBUG [RS_CLOSE_META-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/hbase/meta/1588230740/.tmp/table/e77631fd2db3429da6dc75da64b4ea63 is 96, key is TestAcidGuarantees,,1733617183825.f51bdc360ee4fbe2f9447c9b6b4bf1ce./table:/1733617214213/DeleteFamily/seqid=0 2024-12-08T00:22:46,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742626_1802 (size=5942) 2024-12-08T00:22:46,559 INFO [RS:0;017dd09fb407:36703 {}] regionserver.HRegionServer(1599): Waiting on 1 regions to close 2024-12-08T00:22:46,559 DEBUG [RS:0;017dd09fb407:36703 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-08T00:22:46,559 DEBUG [RS:0;017dd09fb407:36703 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-08T00:22:46,759 DEBUG [RS:0;017dd09fb407:36703 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-08T00:22:46,831 INFO [RS_CLOSE_META-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.08 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/hbase/meta/1588230740/.tmp/table/e77631fd2db3429da6dc75da64b4ea63 2024-12-08T00:22:46,834 DEBUG [RS_CLOSE_META-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/hbase/meta/1588230740/.tmp/info/0c6b0531bd284d5baf784df5a7ccad6d as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/hbase/meta/1588230740/info/0c6b0531bd284d5baf784df5a7ccad6d 2024-12-08T00:22:46,836 INFO [RS_CLOSE_META-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/hbase/meta/1588230740/info/0c6b0531bd284d5baf784df5a7ccad6d, entries=22, sequenceid=93, filesize=7.5 K 2024-12-08T00:22:46,837 DEBUG [RS_CLOSE_META-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/hbase/meta/1588230740/.tmp/rep_barrier/2c466a2d57844795a37dd1dac6c8289d as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/hbase/meta/1588230740/rep_barrier/2c466a2d57844795a37dd1dac6c8289d 2024-12-08T00:22:46,839 INFO [RS_CLOSE_META-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/hbase/meta/1588230740/rep_barrier/2c466a2d57844795a37dd1dac6c8289d, entries=6, sequenceid=93, filesize=5.9 K 2024-12-08T00:22:46,840 DEBUG [RS_CLOSE_META-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/hbase/meta/1588230740/.tmp/table/e77631fd2db3429da6dc75da64b4ea63 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/hbase/meta/1588230740/table/e77631fd2db3429da6dc75da64b4ea63 2024-12-08T00:22:46,842 INFO [RS_CLOSE_META-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/hbase/meta/1588230740/table/e77631fd2db3429da6dc75da64b4ea63, entries=9, sequenceid=93, filesize=5.8 K 2024-12-08T00:22:46,843 INFO [RS_CLOSE_META-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~20.55 KB/21040, heapSize ~35.82 KB/36680, currentSize=0 B/0 for 1588230740 in 1289ms, sequenceid=93, compaction requested=false 2024-12-08T00:22:46,846 DEBUG [RS_CLOSE_META-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/data/hbase/meta/1588230740/recovered.edits/96.seqid, newMaxSeqId=96, maxSeqId=1 2024-12-08T00:22:46,847 DEBUG [RS_CLOSE_META-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-08T00:22:46,847 INFO [RS_CLOSE_META-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-08T00:22:46,847 DEBUG [RS_CLOSE_META-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-08T00:22:46,847 DEBUG [RS_CLOSE_META-regionserver/017dd09fb407:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-08T00:22:46,959 INFO [RS:0;017dd09fb407:36703 {}] regionserver.HRegionServer(1250): stopping server 017dd09fb407,36703,1733617179335; all regions closed. 2024-12-08T00:22:46,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741834_1010 (size=26050) 2024-12-08T00:22:46,965 DEBUG [RS:0;017dd09fb407:36703 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/oldWALs 2024-12-08T00:22:46,965 INFO [RS:0;017dd09fb407:36703 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 017dd09fb407%2C36703%2C1733617179335.meta:.meta(num 1733617181888) 2024-12-08T00:22:46,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741833_1009 (size=17986347) 2024-12-08T00:22:46,968 DEBUG [RS:0;017dd09fb407:36703 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/oldWALs 2024-12-08T00:22:46,968 INFO [RS:0;017dd09fb407:36703 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 017dd09fb407%2C36703%2C1733617179335:(num 1733617181318) 2024-12-08T00:22:46,968 DEBUG [RS:0;017dd09fb407:36703 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:22:46,968 INFO [RS:0;017dd09fb407:36703 {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T00:22:46,969 INFO [RS:0;017dd09fb407:36703 {}] hbase.ChoreService(370): Chore service for: regionserver/017dd09fb407:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-08T00:22:46,969 INFO [regionserver/017dd09fb407:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-08T00:22:46,969 INFO [RS:0;017dd09fb407:36703 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:36703 2024-12-08T00:22:46,973 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36703-0x1006efe8d8b0001, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/017dd09fb407,36703,1733617179335 2024-12-08T00:22:46,973 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44717-0x1006efe8d8b0000, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T00:22:46,975 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [017dd09fb407,36703,1733617179335] 2024-12-08T00:22:46,975 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 017dd09fb407,36703,1733617179335; numProcessing=1 2024-12-08T00:22:46,976 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/017dd09fb407,36703,1733617179335 already deleted, retry=false 2024-12-08T00:22:46,976 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 017dd09fb407,36703,1733617179335 expired; onlineServers=0 2024-12-08T00:22:46,976 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '017dd09fb407,44717,1733617178577' ***** 2024-12-08T00:22:46,976 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-08T00:22:46,976 DEBUG [M:0;017dd09fb407:44717 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d9287db, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=017dd09fb407/172.17.0.2:0 2024-12-08T00:22:46,976 INFO [M:0;017dd09fb407:44717 {}] regionserver.HRegionServer(1224): stopping server 017dd09fb407,44717,1733617178577 2024-12-08T00:22:46,976 INFO [M:0;017dd09fb407:44717 {}] regionserver.HRegionServer(1250): stopping server 017dd09fb407,44717,1733617178577; all regions closed. 2024-12-08T00:22:46,976 DEBUG [M:0;017dd09fb407:44717 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:22:46,977 DEBUG [M:0;017dd09fb407:44717 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-08T00:22:46,977 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-08T00:22:46,977 DEBUG [M:0;017dd09fb407:44717 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-08T00:22:46,977 DEBUG [master/017dd09fb407:0:becomeActiveMaster-HFileCleaner.large.0-1733617181048 {}] cleaner.HFileCleaner(306): Exit Thread[master/017dd09fb407:0:becomeActiveMaster-HFileCleaner.large.0-1733617181048,5,FailOnTimeoutGroup] 2024-12-08T00:22:46,977 DEBUG [master/017dd09fb407:0:becomeActiveMaster-HFileCleaner.small.0-1733617181049 {}] cleaner.HFileCleaner(306): Exit Thread[master/017dd09fb407:0:becomeActiveMaster-HFileCleaner.small.0-1733617181049,5,FailOnTimeoutGroup] 2024-12-08T00:22:46,977 INFO [M:0;017dd09fb407:44717 {}] hbase.ChoreService(370): Chore service for: master/017dd09fb407:0 had [] on shutdown 2024-12-08T00:22:46,977 DEBUG [M:0;017dd09fb407:44717 {}] master.HMaster(1733): Stopping service threads 2024-12-08T00:22:46,977 INFO [M:0;017dd09fb407:44717 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-08T00:22:46,977 ERROR [M:0;017dd09fb407:44717 {}] procedure2.ProcedureExecutor(722): There are still active thread in group java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10], see STDOUT java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10] Thread[HFileArchiver-6,5,PEWorkerGroup] 2024-12-08T00:22:46,978 INFO [M:0;017dd09fb407:44717 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-08T00:22:46,978 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-08T00:22:46,978 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44717-0x1006efe8d8b0000, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-08T00:22:46,978 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44717-0x1006efe8d8b0000, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:22:46,978 DEBUG [M:0;017dd09fb407:44717 {}] zookeeper.ZKUtil(347): master:44717-0x1006efe8d8b0000, quorum=127.0.0.1:62287, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-08T00:22:46,978 WARN [M:0;017dd09fb407:44717 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-08T00:22:46,978 INFO [M:0;017dd09fb407:44717 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-08T00:22:46,978 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:44717-0x1006efe8d8b0000, quorum=127.0.0.1:62287, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-08T00:22:46,978 INFO [M:0;017dd09fb407:44717 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-08T00:22:46,979 DEBUG [M:0;017dd09fb407:44717 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-08T00:22:46,979 INFO [M:0;017dd09fb407:44717 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:22:46,979 DEBUG [M:0;017dd09fb407:44717 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:22:46,979 DEBUG [M:0;017dd09fb407:44717 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-08T00:22:46,979 DEBUG [M:0;017dd09fb407:44717 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:22:46,979 INFO [M:0;017dd09fb407:44717 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=830.74 KB heapSize=1.00 MB 2024-12-08T00:22:46,993 DEBUG [M:0;017dd09fb407:44717 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0902cc3fce894fda9cc8238ff8a9f5be is 82, key is hbase:meta,,1/info:regioninfo/1733617182024/Put/seqid=0 2024-12-08T00:22:46,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742627_1803 (size=5672) 2024-12-08T00:22:47,075 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36703-0x1006efe8d8b0001, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T00:22:47,075 INFO [RS:0;017dd09fb407:36703 {}] regionserver.HRegionServer(1307): Exiting; stopping=017dd09fb407,36703,1733617179335; zookeeper connection closed. 2024-12-08T00:22:47,075 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36703-0x1006efe8d8b0001, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T00:22:47,075 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6884ec63 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6884ec63 2024-12-08T00:22:47,076 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-08T00:22:47,397 INFO [M:0;017dd09fb407:44717 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=2414 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0902cc3fce894fda9cc8238ff8a9f5be 2024-12-08T00:22:47,416 DEBUG [M:0;017dd09fb407:44717 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3a938b85d7d54406ac5aff8d6a9b95b7 is 2285, key is \x00\x00\x00\x00\x00\x00\x00\xA6/proc:d/1733617334926/Put/seqid=0 2024-12-08T00:22:47,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742628_1804 (size=47572) 2024-12-08T00:22:47,820 INFO [M:0;017dd09fb407:44717 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=830.18 KB at sequenceid=2414 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3a938b85d7d54406ac5aff8d6a9b95b7 2024-12-08T00:22:47,823 INFO [M:0;017dd09fb407:44717 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 3a938b85d7d54406ac5aff8d6a9b95b7 2024-12-08T00:22:47,838 DEBUG [M:0;017dd09fb407:44717 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/eb5a97e1e01f4a0c8f20acc8829de9db is 69, key is 017dd09fb407,36703,1733617179335/rs:state/1733617181080/Put/seqid=0 2024-12-08T00:22:47,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073742629_1805 (size=5156) 2024-12-08T00:22:48,242 INFO [M:0;017dd09fb407:44717 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=2414 (bloomFilter=true), to=hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/eb5a97e1e01f4a0c8f20acc8829de9db 2024-12-08T00:22:48,245 DEBUG [M:0;017dd09fb407:44717 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0902cc3fce894fda9cc8238ff8a9f5be as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/0902cc3fce894fda9cc8238ff8a9f5be 2024-12-08T00:22:48,247 INFO [M:0;017dd09fb407:44717 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/0902cc3fce894fda9cc8238ff8a9f5be, entries=8, sequenceid=2414, filesize=5.5 K 2024-12-08T00:22:48,248 DEBUG [M:0;017dd09fb407:44717 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3a938b85d7d54406ac5aff8d6a9b95b7 as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/3a938b85d7d54406ac5aff8d6a9b95b7 2024-12-08T00:22:48,250 INFO [M:0;017dd09fb407:44717 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 3a938b85d7d54406ac5aff8d6a9b95b7 2024-12-08T00:22:48,250 INFO [M:0;017dd09fb407:44717 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/3a938b85d7d54406ac5aff8d6a9b95b7, entries=193, sequenceid=2414, filesize=46.5 K 2024-12-08T00:22:48,251 DEBUG [M:0;017dd09fb407:44717 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/eb5a97e1e01f4a0c8f20acc8829de9db as hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/eb5a97e1e01f4a0c8f20acc8829de9db 2024-12-08T00:22:48,253 INFO [M:0;017dd09fb407:44717 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46183/user/jenkins/test-data/93e70c9c-b24c-e9d0-7b00-55d4825674b3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/eb5a97e1e01f4a0c8f20acc8829de9db, entries=1, sequenceid=2414, filesize=5.0 K 2024-12-08T00:22:48,253 INFO [M:0;017dd09fb407:44717 {}] regionserver.HRegion(3040): Finished flush of dataSize ~830.74 KB/850678, heapSize ~1.00 MB/1050384, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 1274ms, sequenceid=2414, compaction requested=false 2024-12-08T00:22:48,256 INFO [M:0;017dd09fb407:44717 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:22:48,256 DEBUG [M:0;017dd09fb407:44717 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-08T00:22:48,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741830_1006 (size=1008883) 2024-12-08T00:22:48,258 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-08T00:22:48,258 INFO [M:0;017dd09fb407:44717 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-08T00:22:48,258 INFO [M:0;017dd09fb407:44717 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:44717 2024-12-08T00:22:48,260 DEBUG [M:0;017dd09fb407:44717 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/017dd09fb407,44717,1733617178577 already deleted, retry=false 2024-12-08T00:22:48,361 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44717-0x1006efe8d8b0000, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T00:22:48,361 INFO [M:0;017dd09fb407:44717 {}] regionserver.HRegionServer(1307): Exiting; stopping=017dd09fb407,44717,1733617178577; zookeeper connection closed. 2024-12-08T00:22:48,361 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44717-0x1006efe8d8b0000, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T00:22:48,367 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1f79ec76{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T00:22:48,369 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@576ebda6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T00:22:48,369 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T00:22:48,369 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4727fac8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T00:22:48,369 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47db50b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9fef9b88-fce3-0eb6-cbce-4bb545ae17d4/hadoop.log.dir/,STOPPED} 2024-12-08T00:22:48,372 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T00:22:48,372 WARN [BP-1532074142-172.17.0.2-1733617175722 heartbeating to localhost/127.0.0.1:46183 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T00:22:48,372 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T00:22:48,372 WARN [BP-1532074142-172.17.0.2-1733617175722 heartbeating to localhost/127.0.0.1:46183 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1532074142-172.17.0.2-1733617175722 (Datanode Uuid ec2b63d5-2d4b-41e9-8e1d-ba3cd3db80f9) service to localhost/127.0.0.1:46183 2024-12-08T00:22:48,374 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9fef9b88-fce3-0eb6-cbce-4bb545ae17d4/cluster_90f0d14f-daf9-4dbb-8963-b50f17f51d54/dfs/data/data1/current/BP-1532074142-172.17.0.2-1733617175722 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T00:22:48,375 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9fef9b88-fce3-0eb6-cbce-4bb545ae17d4/cluster_90f0d14f-daf9-4dbb-8963-b50f17f51d54/dfs/data/data2/current/BP-1532074142-172.17.0.2-1733617175722 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T00:22:48,375 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T00:22:48,383 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@b03fcff{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-08T00:22:48,384 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T00:22:48,384 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T00:22:48,384 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T00:22:48,384 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9fef9b88-fce3-0eb6-cbce-4bb545ae17d4/hadoop.log.dir/,STOPPED} 2024-12-08T00:22:48,404 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-08T00:22:48,565 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down